summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-12-09 12:34:09 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-12-09 12:34:09 -0800
commit52921c0b55cbd95e88921b97133f0c473b1d38d4 (patch)
tree82d49b30a2b24cdf37bed0c7d0e0bd97a033c7e1
parent8a505d40d2922f05bbd33020fb6e6da071bed431 (diff)
downloadlinux-3.10-52921c0b55cbd95e88921b97133f0c473b1d38d4.tar.gz
linux-3.10-52921c0b55cbd95e88921b97133f0c473b1d38d4.tar.bz2
linux-3.10-52921c0b55cbd95e88921b97133f0c473b1d38d4.zip
LTTng 2.3.4
This adds a working LTTng to the tree, and brings it up to date with the latest stable release, 2.3.4, from the upstream LTTng repo. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--patches.lttng/0000-lttng-lib-lttng-priority-heap.patch344
-rw-r--r--patches.lttng/0001-lttng-lib-ring-buffer.patch6804
-rw-r--r--patches.lttng/0002-lttng-lib-portable-bitfield-read-write-header.patch421
-rw-r--r--patches.lttng/0003-lttng-BUILD_RUNTIME_BUG_ON.patch50
-rw-r--r--patches.lttng/0004-lttng-offset-alignment-header.patch82
-rw-r--r--patches.lttng/0005-lttng-libs-add-Makefile.patch32
-rw-r--r--patches.lttng/0006-lttng-wrappers.patch625
-rw-r--r--patches.lttng/0007-lttng-instrumentation-tracepoint-events.patch3227
-rw-r--r--patches.lttng/0008-lttng-syscall-instrumentation.patch7758
-rw-r--r--patches.lttng/0009-lttng-lib-ring-buffer-clients.patch1106
-rw-r--r--patches.lttng/0010-lttng-tracer-control-and-core-structures.patch1812
-rw-r--r--patches.lttng/0011-lttng-dynamically-selectable-context-information.patch1131
-rw-r--r--patches.lttng/0012-lttng-timing-calibration-feature.patch54
-rw-r--r--patches.lttng/0013-lttng-debugfs-and-procfs-ABI.patch965
-rw-r--r--patches.lttng/0014-lttng-Add-documentation-and-TODO-files.patch249
-rw-r--r--patches.lttng/0015-lttng-add-system-call-instrumentation-probe.patch459
-rw-r--r--patches.lttng/0016-lttng-probe-callbacks.patch2035
-rw-r--r--patches.lttng/0017-lttng-toplevel-Makefile-and-Kconfig.patch97
-rw-r--r--patches.lttng/0018-staging-add-LTTng-to-build.patch28
-rw-r--r--patches.lttng/0019-staging-Add-LTTng-entry-to-MAINTAINERS-file.patch27
-rw-r--r--patches.lttng/0069-lttng-lib-ring-buffer-remove-stale-null-pointer.patch63
-rw-r--r--patches.lttng/0070-lttng-lib-ring-buffer-remove-duplicate-null-pointer.patch60
-rw-r--r--patches.lttng/0071-lttng-lib-ring-buffer-move-null-pointer-check-to-ope.patch75
-rw-r--r--patches.lttng/0072-lttng-wrapper-add-missing-include-to-kallsyms-wrappe.patch29
-rw-r--r--patches.lttng/0073-staging-lttng-cleanup-one-bit-signed-bitfields.patch209
-rw-r--r--patches.lttng/0172-staging-lttng-Fix-recent-modifications-to-string_fro.patch42
-rw-r--r--patches.lttng/0173-staging-lttng-TODO-update-lttng-reported-to-work-fin.patch37
-rw-r--r--patches.lttng/0174-staging-lttng-Update-max-symbol-length-to-256.patch30
-rw-r--r--patches.lttng/lttng-2.3.4.patch69694
-rw-r--r--patches.lttng/lttng-fix-module-name-lttng-relay.ko-lttng-tracer.ko.patch55
-rw-r--r--patches.lttng/lttng-fix-reference-to-obsolete-rt-kconfig-variable.patch38
-rw-r--r--patches.lttng/lttng-update-2.0.1-to-2.0.4.patch204
-rw-r--r--patches.lttng/lttng-update-to-v2.0.1.patch14289
-rw-r--r--series38
34 files changed, 69698 insertions, 42471 deletions
diff --git a/patches.lttng/0000-lttng-lib-lttng-priority-heap.patch b/patches.lttng/0000-lttng-lib-lttng-priority-heap.patch
deleted file mode 100644
index 2c7aa683d20..00000000000
--- a/patches.lttng/0000-lttng-lib-lttng-priority-heap.patch
+++ /dev/null
@@ -1,344 +0,0 @@
-From 1b4d28b622fd88745fc020dd3e363e586e4f9943 Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 28 Nov 2011 07:42:08 -0500
-Subject: lttng lib: lttng priority heap
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-
-diff --git a/drivers/staging/lttng/lib/prio_heap/lttng_prio_heap.c b/drivers/staging/lttng/lib/prio_heap/lttng_prio_heap.c
-new file mode 100644
-index 0000000..2fce143
---- /dev/null
-+++ b/drivers/staging/lttng/lib/prio_heap/lttng_prio_heap.c
-@@ -0,0 +1,207 @@
-+/*
-+ * lttng_prio_heap.c
-+ *
-+ * Priority heap containing pointers. Based on CLRS, chapter 6.
-+ *
-+ * Copyright 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to deal
-+ * in the Software without restriction, including without limitation the rights
-+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-+ * copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ */
-+
-+#include <linux/slab.h>
-+#include "lttng_prio_heap.h"
-+
-+#ifdef DEBUG_HEAP
-+void lttng_check_heap(const struct lttng_ptr_heap *heap)
-+{
-+ size_t i;
-+
-+ if (!heap->len)
-+ return;
-+
-+ for (i = 1; i < heap->len; i++)
-+ WARN_ON_ONCE(!heap->gt(heap->ptrs[i], heap->ptrs[0]));
-+}
-+#endif
-+
-+static
-+size_t parent(size_t i)
-+{
-+ return (i -1) >> 1;
-+}
-+
-+static
-+size_t left(size_t i)
-+{
-+ return (i << 1) + 1;
-+}
-+
-+static
-+size_t right(size_t i)
-+{
-+ return (i << 1) + 2;
-+}
-+
-+/*
-+ * Copy of heap->ptrs pointer is invalid after heap_grow.
-+ */
-+static
-+int heap_grow(struct lttng_ptr_heap *heap, size_t new_len)
-+{
-+ void **new_ptrs;
-+
-+ if (heap->alloc_len >= new_len)
-+ return 0;
-+
-+ heap->alloc_len = max_t(size_t, new_len, heap->alloc_len << 1);
-+ new_ptrs = kmalloc(heap->alloc_len * sizeof(void *), heap->gfpmask);
-+ if (!new_ptrs)
-+ return -ENOMEM;
-+ if (heap->ptrs)
-+ memcpy(new_ptrs, heap->ptrs, heap->len * sizeof(void *));
-+ kfree(heap->ptrs);
-+ heap->ptrs = new_ptrs;
-+ return 0;
-+}
-+
-+static
-+int heap_set_len(struct lttng_ptr_heap *heap, size_t new_len)
-+{
-+ int ret;
-+
-+ ret = heap_grow(heap, new_len);
-+ if (ret)
-+ return ret;
-+ heap->len = new_len;
-+ return 0;
-+}
-+
-+int lttng_heap_init(struct lttng_ptr_heap *heap, size_t alloc_len,
-+ gfp_t gfpmask, int gt(void *a, void *b))
-+{
-+ heap->ptrs = NULL;
-+ heap->len = 0;
-+ heap->alloc_len = 0;
-+ heap->gt = gt;
-+ heap->gfpmask = gfpmask;
-+ /*
-+ * Minimum size allocated is 1 entry to ensure memory allocation
-+ * never fails within heap_replace_max.
-+ */
-+ return heap_grow(heap, max_t(size_t, 1, alloc_len));
-+}
-+
-+void lttng_heap_free(struct lttng_ptr_heap *heap)
-+{
-+ kfree(heap->ptrs);
-+}
-+
-+static void heapify(struct lttng_ptr_heap *heap, size_t i)
-+{
-+ void **ptrs = heap->ptrs;
-+ size_t l, r, largest;
-+
-+ for (;;) {
-+ void *tmp;
-+
-+ l = left(i);
-+ r = right(i);
-+ if (l < heap->len && heap->gt(ptrs[l], ptrs[i]))
-+ largest = l;
-+ else
-+ largest = i;
-+ if (r < heap->len && heap->gt(ptrs[r], ptrs[largest]))
-+ largest = r;
-+ if (largest == i)
-+ break;
-+ tmp = ptrs[i];
-+ ptrs[i] = ptrs[largest];
-+ ptrs[largest] = tmp;
-+ i = largest;
-+ }
-+ lttng_check_heap(heap);
-+}
-+
-+void *lttng_heap_replace_max(struct lttng_ptr_heap *heap, void *p)
-+{
-+ void *res;
-+
-+ if (!heap->len) {
-+ (void) heap_set_len(heap, 1);
-+ heap->ptrs[0] = p;
-+ lttng_check_heap(heap);
-+ return NULL;
-+ }
-+
-+ /* Replace the current max and heapify */
-+ res = heap->ptrs[0];
-+ heap->ptrs[0] = p;
-+ heapify(heap, 0);
-+ return res;
-+}
-+
-+int lttng_heap_insert(struct lttng_ptr_heap *heap, void *p)
-+{
-+ void **ptrs;
-+ size_t pos;
-+ int ret;
-+
-+ ret = heap_set_len(heap, heap->len + 1);
-+ if (ret)
-+ return ret;
-+ ptrs = heap->ptrs;
-+ pos = heap->len - 1;
-+ while (pos > 0 && heap->gt(p, ptrs[parent(pos)])) {
-+ /* Move parent down until we find the right spot */
-+ ptrs[pos] = ptrs[parent(pos)];
-+ pos = parent(pos);
-+ }
-+ ptrs[pos] = p;
-+ lttng_check_heap(heap);
-+ return 0;
-+}
-+
-+void *lttng_heap_remove(struct lttng_ptr_heap *heap)
-+{
-+ switch (heap->len) {
-+ case 0:
-+ return NULL;
-+ case 1:
-+ (void) heap_set_len(heap, 0);
-+ return heap->ptrs[0];
-+ }
-+ /* Shrink, replace the current max by previous last entry and heapify */
-+ heap_set_len(heap, heap->len - 1);
-+ /* len changed. previous last entry is at heap->len */
-+ return lttng_heap_replace_max(heap, heap->ptrs[heap->len]);
-+}
-+
-+void *lttng_heap_cherrypick(struct lttng_ptr_heap *heap, void *p)
-+{
-+ size_t pos, len = heap->len;
-+
-+ for (pos = 0; pos < len; pos++)
-+ if (heap->ptrs[pos] == p)
-+ goto found;
-+ return NULL;
-+found:
-+ if (heap->len == 1) {
-+ (void) heap_set_len(heap, 0);
-+ lttng_check_heap(heap);
-+ return heap->ptrs[0];
-+ }
-+ /* Replace p with previous last entry and heapify. */
-+ heap_set_len(heap, heap->len - 1);
-+ /* len changed. previous last entry is at heap->len */
-+ heap->ptrs[pos] = heap->ptrs[heap->len];
-+ heapify(heap, pos);
-+ return p;
-+}
-diff --git a/drivers/staging/lttng/lib/prio_heap/lttng_prio_heap.h b/drivers/staging/lttng/lib/prio_heap/lttng_prio_heap.h
-new file mode 100644
-index 0000000..ea8dbb8
---- /dev/null
-+++ b/drivers/staging/lttng/lib/prio_heap/lttng_prio_heap.h
-@@ -0,0 +1,117 @@
-+#ifndef _LTTNG_PRIO_HEAP_H
-+#define _LTTNG_PRIO_HEAP_H
-+
-+/*
-+ * lttng_prio_heap.h
-+ *
-+ * Priority heap containing pointers. Based on CLRS, chapter 6.
-+ *
-+ * Copyright 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to deal
-+ * in the Software without restriction, including without limitation the rights
-+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-+ * copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ */
-+
-+#include <linux/gfp.h>
-+
-+struct lttng_ptr_heap {
-+ size_t len, alloc_len;
-+ void **ptrs;
-+ int (*gt)(void *a, void *b);
-+ gfp_t gfpmask;
-+};
-+
-+#ifdef DEBUG_HEAP
-+void lttng_check_heap(const struct lttng_ptr_heap *heap);
-+#else
-+static inline
-+void lttng_check_heap(const struct lttng_ptr_heap *heap)
-+{
-+}
-+#endif
-+
-+/**
-+ * lttng_heap_maximum - return the largest element in the heap
-+ * @heap: the heap to be operated on
-+ *
-+ * Returns the largest element in the heap, without performing any modification
-+ * to the heap structure. Returns NULL if the heap is empty.
-+ */
-+static inline void *lttng_heap_maximum(const struct lttng_ptr_heap *heap)
-+{
-+ lttng_check_heap(heap);
-+ return heap->len ? heap->ptrs[0] : NULL;
-+}
-+
-+/**
-+ * lttng_heap_init - initialize the heap
-+ * @heap: the heap to initialize
-+ * @alloc_len: number of elements initially allocated
-+ * @gfp: allocation flags
-+ * @gt: function to compare the elements
-+ *
-+ * Returns -ENOMEM if out of memory.
-+ */
-+extern int lttng_heap_init(struct lttng_ptr_heap *heap,
-+ size_t alloc_len, gfp_t gfpmask,
-+ int gt(void *a, void *b));
-+
-+/**
-+ * lttng_heap_free - free the heap
-+ * @heap: the heap to free
-+ */
-+extern void lttng_heap_free(struct lttng_ptr_heap *heap);
-+
-+/**
-+ * lttng_heap_insert - insert an element into the heap
-+ * @heap: the heap to be operated on
-+ * @p: the element to add
-+ *
-+ * Insert an element into the heap.
-+ *
-+ * Returns -ENOMEM if out of memory.
-+ */
-+extern int lttng_heap_insert(struct lttng_ptr_heap *heap, void *p);
-+
-+/**
-+ * lttng_heap_remove - remove the largest element from the heap
-+ * @heap: the heap to be operated on
-+ *
-+ * Returns the largest element in the heap. It removes this element from the
-+ * heap. Returns NULL if the heap is empty.
-+ */
-+extern void *lttng_heap_remove(struct lttng_ptr_heap *heap);
-+
-+/**
-+ * lttng_heap_cherrypick - remove a given element from the heap
-+ * @heap: the heap to be operated on
-+ * @p: the element
-+ *
-+ * Remove the given element from the heap. Return the element if present, else
-+ * return NULL. This algorithm has a complexity of O(n), which is higher than
-+ * O(log(n)) provided by the rest of this API.
-+ */
-+extern void *lttng_heap_cherrypick(struct lttng_ptr_heap *heap, void *p);
-+
-+/**
-+ * lttng_heap_replace_max - replace the the largest element from the heap
-+ * @heap: the heap to be operated on
-+ * @p: the pointer to be inserted as topmost element replacement
-+ *
-+ * Returns the largest element in the heap. It removes this element from the
-+ * heap. The heap is rebalanced only once after the insertion. Returns NULL if
-+ * the heap is empty.
-+ *
-+ * This is the equivalent of calling heap_remove() and then heap_insert(), but
-+ * it only rebalances the heap once. It never allocates memory.
-+ */
-+extern void *lttng_heap_replace_max(struct lttng_ptr_heap *heap, void *p);
-+
-+#endif /* _LTTNG_PRIO_HEAP_H */
diff --git a/patches.lttng/0001-lttng-lib-ring-buffer.patch b/patches.lttng/0001-lttng-lib-ring-buffer.patch
deleted file mode 100644
index 9d27c5fd968..00000000000
--- a/patches.lttng/0001-lttng-lib-ring-buffer.patch
+++ /dev/null
@@ -1,6804 +0,0 @@
-From c844b2f5cfea185bcc5b5344ee642b3e3ee7ff03 Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 28 Nov 2011 07:42:09 -0500
-Subject: lttng lib: ring buffer
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- drivers/staging/lttng/lib/ringbuffer/api.h | 25 +
- drivers/staging/lttng/lib/ringbuffer/backend.h | 250 +++
- .../lttng/lib/ringbuffer/backend_internal.h | 449 +++++
- .../staging/lttng/lib/ringbuffer/backend_types.h | 80 +
- drivers/staging/lttng/lib/ringbuffer/config.h | 298 ++++
- drivers/staging/lttng/lib/ringbuffer/frontend.h | 228 +++
- .../staging/lttng/lib/ringbuffer/frontend_api.h | 358 ++++
- .../lttng/lib/ringbuffer/frontend_internal.h | 424 +++++
- .../staging/lttng/lib/ringbuffer/frontend_types.h | 176 ++
- drivers/staging/lttng/lib/ringbuffer/iterator.h | 70 +
- drivers/staging/lttng/lib/ringbuffer/nohz.h | 30 +
- .../lttng/lib/ringbuffer/ring_buffer_backend.c | 854 ++++++++++
- .../lttng/lib/ringbuffer/ring_buffer_frontend.c | 1721 ++++++++++++++++++++
- .../lttng/lib/ringbuffer/ring_buffer_iterator.c | 798 +++++++++
- .../lttng/lib/ringbuffer/ring_buffer_mmap.c | 115 ++
- .../lttng/lib/ringbuffer/ring_buffer_splice.c | 202 +++
- .../staging/lttng/lib/ringbuffer/ring_buffer_vfs.c | 387 +++++
- drivers/staging/lttng/lib/ringbuffer/vatomic.h | 85 +
- drivers/staging/lttng/lib/ringbuffer/vfs.h | 89 +
- 19 files changed, 6639 insertions(+), 0 deletions(-)
- create mode 100644 drivers/staging/lttng/lib/ringbuffer/api.h
- create mode 100644 drivers/staging/lttng/lib/ringbuffer/backend.h
- create mode 100644 drivers/staging/lttng/lib/ringbuffer/backend_internal.h
- create mode 100644 drivers/staging/lttng/lib/ringbuffer/backend_types.h
- create mode 100644 drivers/staging/lttng/lib/ringbuffer/config.h
- create mode 100644 drivers/staging/lttng/lib/ringbuffer/frontend.h
- create mode 100644 drivers/staging/lttng/lib/ringbuffer/frontend_api.h
- create mode 100644 drivers/staging/lttng/lib/ringbuffer/frontend_internal.h
- create mode 100644 drivers/staging/lttng/lib/ringbuffer/frontend_types.h
- create mode 100644 drivers/staging/lttng/lib/ringbuffer/iterator.h
- create mode 100644 drivers/staging/lttng/lib/ringbuffer/nohz.h
- create mode 100644 drivers/staging/lttng/lib/ringbuffer/ring_buffer_backend.c
- create mode 100644 drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c
- create mode 100644 drivers/staging/lttng/lib/ringbuffer/ring_buffer_iterator.c
- create mode 100644 drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c
- create mode 100644 drivers/staging/lttng/lib/ringbuffer/ring_buffer_splice.c
- create mode 100644 drivers/staging/lttng/lib/ringbuffer/ring_buffer_vfs.c
- create mode 100644 drivers/staging/lttng/lib/ringbuffer/vatomic.h
- create mode 100644 drivers/staging/lttng/lib/ringbuffer/vfs.h
-
-diff --git a/drivers/staging/lttng/lib/ringbuffer/api.h b/drivers/staging/lttng/lib/ringbuffer/api.h
-new file mode 100644
-index 0000000..f8a1145
---- /dev/null
-+++ b/drivers/staging/lttng/lib/ringbuffer/api.h
-@@ -0,0 +1,25 @@
-+#ifndef _LINUX_RING_BUFFER_API_H
-+#define _LINUX_RING_BUFFER_API_H
-+
-+/*
-+ * linux/ringbuffer/api.h
-+ *
-+ * Copyright (C) 2010 - Mathieu Desnoyers "mathieu.desnoyers@efficios.com"
-+ *
-+ * Ring Buffer API.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include "../../wrapper/ringbuffer/backend.h"
-+#include "../../wrapper/ringbuffer/frontend.h"
-+#include "../../wrapper/ringbuffer/vfs.h"
-+
-+/*
-+ * ring_buffer_frontend_api.h contains static inline functions that depend on
-+ * client static inlines. Hence the inclusion of this "api" header only
-+ * within the client.
-+ */
-+#include "../../wrapper/ringbuffer/frontend_api.h"
-+
-+#endif /* _LINUX_RING_BUFFER_API_H */
-diff --git a/drivers/staging/lttng/lib/ringbuffer/backend.h b/drivers/staging/lttng/lib/ringbuffer/backend.h
-new file mode 100644
-index 0000000..541dc53
---- /dev/null
-+++ b/drivers/staging/lttng/lib/ringbuffer/backend.h
-@@ -0,0 +1,250 @@
-+#ifndef _LINUX_RING_BUFFER_BACKEND_H
-+#define _LINUX_RING_BUFFER_BACKEND_H
-+
-+/*
-+ * linux/ringbuffer/backend.h
-+ *
-+ * Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Ring buffer backend (API).
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ *
-+ * Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by
-+ * the reader in flight recorder mode.
-+ */
-+
-+#include <linux/types.h>
-+#include <linux/sched.h>
-+#include <linux/timer.h>
-+#include <linux/wait.h>
-+#include <linux/poll.h>
-+#include <linux/list.h>
-+#include <linux/fs.h>
-+#include <linux/mm.h>
-+
-+/* Internal helpers */
-+#include "../../wrapper/ringbuffer/backend_internal.h"
-+#include "../../wrapper/ringbuffer/frontend_internal.h"
-+
-+/* Ring buffer backend API */
-+
-+/* Ring buffer backend access (read/write) */
-+
-+extern size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb,
-+ size_t offset, void *dest, size_t len);
-+
-+extern int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
-+ size_t offset, void __user *dest,
-+ size_t len);
-+
-+extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb,
-+ size_t offset, void *dest, size_t len);
-+
-+extern struct page **
-+lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend *bufb, size_t offset,
-+ void ***virt);
-+
-+/*
-+ * Return the address where a given offset is located.
-+ * Should be used to get the current subbuffer header pointer. Given we know
-+ * it's never on a page boundary, it's safe to write directly to this address,
-+ * as long as the write is never bigger than a page size.
-+ */
-+extern void *
-+lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
-+ size_t offset);
-+extern void *
-+lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
-+ size_t offset);
-+
-+/**
-+ * lib_ring_buffer_write - write data to a buffer backend
-+ * @config : ring buffer instance configuration
-+ * @ctx: ring buffer context. (input arguments only)
-+ * @src : source pointer to copy from
-+ * @len : length of data to copy
-+ *
-+ * This function copies "len" bytes of data from a source pointer to a buffer
-+ * backend, at the current context offset. This is more or less a buffer
-+ * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
-+ * if copy is crossing a page boundary.
-+ */
-+static inline
-+void lib_ring_buffer_write(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_ctx *ctx,
-+ const void *src, size_t len)
-+{
-+ struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
-+ struct channel_backend *chanb = &ctx->chan->backend;
-+ size_t sbidx, index;
-+ size_t offset = ctx->buf_offset;
-+ ssize_t pagecpy;
-+ struct lib_ring_buffer_backend_pages *rpages;
-+ unsigned long sb_bindex, id;
-+
-+ offset &= chanb->buf_size - 1;
-+ sbidx = offset >> chanb->subbuf_size_order;
-+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-+ pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
-+ id = bufb->buf_wsb[sbidx].id;
-+ sb_bindex = subbuffer_id_get_index(config, id);
-+ rpages = bufb->array[sb_bindex];
-+ CHAN_WARN_ON(ctx->chan,
-+ config->mode == RING_BUFFER_OVERWRITE
-+ && subbuffer_id_is_noref(config, id));
-+ if (likely(pagecpy == len))
-+ lib_ring_buffer_do_copy(config,
-+ rpages->p[index].virt
-+ + (offset & ~PAGE_MASK),
-+ src, len);
-+ else
-+ _lib_ring_buffer_write(bufb, offset, src, len, 0);
-+ ctx->buf_offset += len;
-+}
-+
-+/**
-+ * lib_ring_buffer_memset - write len bytes of c to a buffer backend
-+ * @config : ring buffer instance configuration
-+ * @bufb : ring buffer backend
-+ * @offset : offset within the buffer
-+ * @c : the byte to copy
-+ * @len : number of bytes to copy
-+ *
-+ * This function writes "len" bytes of "c" to a buffer backend, at a specific
-+ * offset. This is more or less a buffer backend-specific memset() operation.
-+ * Calls the slow path (_ring_buffer_memset) if write is crossing a page
-+ * boundary.
-+ */
-+static inline
-+void lib_ring_buffer_memset(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_ctx *ctx, int c, size_t len)
-+{
-+
-+ struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
-+ struct channel_backend *chanb = &ctx->chan->backend;
-+ size_t sbidx, index;
-+ size_t offset = ctx->buf_offset;
-+ ssize_t pagecpy;
-+ struct lib_ring_buffer_backend_pages *rpages;
-+ unsigned long sb_bindex, id;
-+
-+ offset &= chanb->buf_size - 1;
-+ sbidx = offset >> chanb->subbuf_size_order;
-+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-+ pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
-+ id = bufb->buf_wsb[sbidx].id;
-+ sb_bindex = subbuffer_id_get_index(config, id);
-+ rpages = bufb->array[sb_bindex];
-+ CHAN_WARN_ON(ctx->chan,
-+ config->mode == RING_BUFFER_OVERWRITE
-+ && subbuffer_id_is_noref(config, id));
-+ if (likely(pagecpy == len))
-+ lib_ring_buffer_do_memset(rpages->p[index].virt
-+ + (offset & ~PAGE_MASK),
-+ c, len);
-+ else
-+ _lib_ring_buffer_memset(bufb, offset, c, len, 0);
-+ ctx->buf_offset += len;
-+}
-+
-+/**
-+ * lib_ring_buffer_copy_from_user - write userspace data to a buffer backend
-+ * @config : ring buffer instance configuration
-+ * @ctx: ring buffer context. (input arguments only)
-+ * @src : userspace source pointer to copy from
-+ * @len : length of data to copy
-+ *
-+ * This function copies "len" bytes of data from a userspace pointer to a
-+ * buffer backend, at the current context offset. This is more or less a buffer
-+ * backend-specific memcpy() operation. Calls the slow path
-+ * (_ring_buffer_write_from_user) if copy is crossing a page boundary.
-+ */
-+static inline
-+void lib_ring_buffer_copy_from_user(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_ctx *ctx,
-+ const void __user *src, size_t len)
-+{
-+ struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
-+ struct channel_backend *chanb = &ctx->chan->backend;
-+ size_t sbidx, index;
-+ size_t offset = ctx->buf_offset;
-+ ssize_t pagecpy;
-+ struct lib_ring_buffer_backend_pages *rpages;
-+ unsigned long sb_bindex, id;
-+ unsigned long ret;
-+
-+ offset &= chanb->buf_size - 1;
-+ sbidx = offset >> chanb->subbuf_size_order;
-+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-+ pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
-+ id = bufb->buf_wsb[sbidx].id;
-+ sb_bindex = subbuffer_id_get_index(config, id);
-+ rpages = bufb->array[sb_bindex];
-+ CHAN_WARN_ON(ctx->chan,
-+ config->mode == RING_BUFFER_OVERWRITE
-+ && subbuffer_id_is_noref(config, id));
-+
-+ if (unlikely(!access_ok(VERIFY_READ, src, len)))
-+ goto fill_buffer;
-+
-+ if (likely(pagecpy == len)) {
-+ ret = lib_ring_buffer_do_copy_from_user(
-+ rpages->p[index].virt + (offset & ~PAGE_MASK),
-+ src, len);
-+ if (unlikely(ret > 0)) {
-+ len -= (pagecpy - ret);
-+ offset += (pagecpy - ret);
-+ goto fill_buffer;
-+ }
-+ } else {
-+ _lib_ring_buffer_copy_from_user(bufb, offset, src, len, 0);
-+ }
-+ ctx->buf_offset += len;
-+
-+ return;
-+
-+fill_buffer:
-+ /*
-+ * In the error path we call the slow path version to avoid
-+ * the pollution of static inline code.
-+ */
-+ _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
-+}
-+
-+/*
-+ * This accessor counts the number of unread records in a buffer.
-+ * It only provides a consistent value if no reads not writes are performed
-+ * concurrently.
-+ */
-+static inline
-+unsigned long lib_ring_buffer_get_records_unread(
-+ const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf)
-+{
-+ struct lib_ring_buffer_backend *bufb = &buf->backend;
-+ struct lib_ring_buffer_backend_pages *pages;
-+ unsigned long records_unread = 0, sb_bindex, id;
-+ unsigned int i;
-+
-+ for (i = 0; i < bufb->chan->backend.num_subbuf; i++) {
-+ id = bufb->buf_wsb[i].id;
-+ sb_bindex = subbuffer_id_get_index(config, id);
-+ pages = bufb->array[sb_bindex];
-+ records_unread += v_read(config, &pages->records_unread);
-+ }
-+ if (config->mode == RING_BUFFER_OVERWRITE) {
-+ id = bufb->buf_rsb.id;
-+ sb_bindex = subbuffer_id_get_index(config, id);
-+ pages = bufb->array[sb_bindex];
-+ records_unread += v_read(config, &pages->records_unread);
-+ }
-+ return records_unread;
-+}
-+
-+ssize_t lib_ring_buffer_file_splice_read(struct file *in, loff_t *ppos,
-+ struct pipe_inode_info *pipe,
-+ size_t len, unsigned int flags);
-+loff_t lib_ring_buffer_no_llseek(struct file *file, loff_t offset, int origin);
-+
-+#endif /* _LINUX_RING_BUFFER_BACKEND_H */
-diff --git a/drivers/staging/lttng/lib/ringbuffer/backend_internal.h b/drivers/staging/lttng/lib/ringbuffer/backend_internal.h
-new file mode 100644
-index 0000000..442f357
---- /dev/null
-+++ b/drivers/staging/lttng/lib/ringbuffer/backend_internal.h
-@@ -0,0 +1,449 @@
-+#ifndef _LINUX_RING_BUFFER_BACKEND_INTERNAL_H
-+#define _LINUX_RING_BUFFER_BACKEND_INTERNAL_H
-+
-+/*
-+ * linux/ringbuffer/backend_internal.h
-+ *
-+ * Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Ring buffer backend (internal helpers).
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include "../../wrapper/ringbuffer/config.h"
-+#include "../../wrapper/ringbuffer/backend_types.h"
-+#include "../../wrapper/ringbuffer/frontend_types.h"
-+#include <linux/string.h>
-+#include <linux/uaccess.h>
-+
-+/* Ring buffer backend API presented to the frontend */
-+
-+/* Ring buffer and channel backend create/free */
-+
-+int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
-+ struct channel_backend *chan, int cpu);
-+void channel_backend_unregister_notifiers(struct channel_backend *chanb);
-+void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb);
-+int channel_backend_init(struct channel_backend *chanb,
-+ const char *name,
-+ const struct lib_ring_buffer_config *config,
-+ void *priv, size_t subbuf_size,
-+ size_t num_subbuf);
-+void channel_backend_free(struct channel_backend *chanb);
-+
-+void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb);
-+void channel_backend_reset(struct channel_backend *chanb);
-+
-+int lib_ring_buffer_backend_init(void);
-+void lib_ring_buffer_backend_exit(void);
-+
-+extern void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb,
-+ size_t offset, const void *src, size_t len,
-+ ssize_t pagecpy);
-+extern void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
-+ size_t offset, int c, size_t len,
-+ ssize_t pagecpy);
-+extern void _lib_ring_buffer_copy_from_user(struct lib_ring_buffer_backend *bufb,
-+ size_t offset, const void *src,
-+ size_t len, ssize_t pagecpy);
-+
-+/*
-+ * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
-+ * exchanged atomically.
-+ *
-+ * Top half word, except lowest bit, belongs to "offset", which is used to keep
-+ * to count the produced buffers. For overwrite mode, this provides the
-+ * consumer with the capacity to read subbuffers in order, handling the
-+ * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
-+ * systems) concurrently with a single execution of get_subbuf (between offset
-+ * sampling and subbuffer ID exchange).
-+ */
-+
-+#define HALF_ULONG_BITS (BITS_PER_LONG >> 1)
-+
-+#define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
-+#define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
-+#define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
-+/*
-+ * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
-+ */
-+#define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
-+#define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
-+#define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
-+/*
-+ * In overwrite mode: lowest half of word is used for index.
-+ * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
-+ * In producer-consumer mode: whole word used for index.
-+ */
-+#define SB_ID_INDEX_SHIFT 0
-+#define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
-+#define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
-+
-+/*
-+ * Construct the subbuffer id from offset, index and noref. Use only the index
-+ * for producer-consumer mode (offset and noref are only used in overwrite
-+ * mode).
-+ */
-+static inline
-+unsigned long subbuffer_id(const struct lib_ring_buffer_config *config,
-+ unsigned long offset, unsigned long noref,
-+ unsigned long index)
-+{
-+ if (config->mode == RING_BUFFER_OVERWRITE)
-+ return (offset << SB_ID_OFFSET_SHIFT)
-+ | (noref << SB_ID_NOREF_SHIFT)
-+ | index;
-+ else
-+ return index;
-+}
-+
-+/*
-+ * Compare offset with the offset contained within id. Return 1 if the offset
-+ * bits are identical, else 0.
-+ */
-+static inline
-+int subbuffer_id_compare_offset(const struct lib_ring_buffer_config *config,
-+ unsigned long id, unsigned long offset)
-+{
-+ return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
-+}
-+
-+static inline
-+unsigned long subbuffer_id_get_index(const struct lib_ring_buffer_config *config,
-+ unsigned long id)
-+{
-+ if (config->mode == RING_BUFFER_OVERWRITE)
-+ return id & SB_ID_INDEX_MASK;
-+ else
-+ return id;
-+}
-+
-+static inline
-+unsigned long subbuffer_id_is_noref(const struct lib_ring_buffer_config *config,
-+ unsigned long id)
-+{
-+ if (config->mode == RING_BUFFER_OVERWRITE)
-+ return !!(id & SB_ID_NOREF_MASK);
-+ else
-+ return 1;
-+}
-+
-+/*
-+ * Only used by reader on subbuffer ID it has exclusive access to. No volatile
-+ * needed.
-+ */
-+static inline
-+void subbuffer_id_set_noref(const struct lib_ring_buffer_config *config,
-+ unsigned long *id)
-+{
-+ if (config->mode == RING_BUFFER_OVERWRITE)
-+ *id |= SB_ID_NOREF_MASK;
-+}
-+
-+static inline
-+void subbuffer_id_set_noref_offset(const struct lib_ring_buffer_config *config,
-+ unsigned long *id, unsigned long offset)
-+{
-+ unsigned long tmp;
-+
-+ if (config->mode == RING_BUFFER_OVERWRITE) {
-+ tmp = *id;
-+ tmp &= ~SB_ID_OFFSET_MASK;
-+ tmp |= offset << SB_ID_OFFSET_SHIFT;
-+ tmp |= SB_ID_NOREF_MASK;
-+ /* Volatile store, read concurrently by readers. */
-+ ACCESS_ONCE(*id) = tmp;
-+ }
-+}
-+
-+/* No volatile access, since already used locally */
-+static inline
-+void subbuffer_id_clear_noref(const struct lib_ring_buffer_config *config,
-+ unsigned long *id)
-+{
-+ if (config->mode == RING_BUFFER_OVERWRITE)
-+ *id &= ~SB_ID_NOREF_MASK;
-+}
-+
-+/*
-+ * For overwrite mode, cap the number of subbuffers per buffer to:
-+ * 2^16 on 32-bit architectures
-+ * 2^32 on 64-bit architectures
-+ * This is required to fit in the index part of the ID. Return 0 on success,
-+ * -EPERM on failure.
-+ */
-+static inline
-+int subbuffer_id_check_index(const struct lib_ring_buffer_config *config,
-+ unsigned long num_subbuf)
-+{
-+ if (config->mode == RING_BUFFER_OVERWRITE)
-+ return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0;
-+ else
-+ return 0;
-+}
-+
-+static inline
-+void subbuffer_count_record(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_backend *bufb,
-+ unsigned long idx)
-+{
-+ unsigned long sb_bindex;
-+
-+ sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
-+ v_inc(config, &bufb->array[sb_bindex]->records_commit);
-+}
-+
-+/*
-+ * Reader has exclusive subbuffer access for record consumption. No need to
-+ * perform the decrement atomically.
-+ */
-+static inline
-+void subbuffer_consume_record(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_backend *bufb)
-+{
-+ unsigned long sb_bindex;
-+
-+ sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
-+ CHAN_WARN_ON(bufb->chan,
-+ !v_read(config, &bufb->array[sb_bindex]->records_unread));
-+ /* Non-atomic decrement protected by exclusive subbuffer access */
-+ _v_dec(config, &bufb->array[sb_bindex]->records_unread);
-+ v_inc(config, &bufb->records_read);
-+}
-+
-+static inline
-+unsigned long subbuffer_get_records_count(
-+ const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_backend *bufb,
-+ unsigned long idx)
-+{
-+ unsigned long sb_bindex;
-+
-+ sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
-+ return v_read(config, &bufb->array[sb_bindex]->records_commit);
-+}
-+
-+/*
-+ * Must be executed at subbuffer delivery when the writer has _exclusive_
-+ * subbuffer access. See ring_buffer_check_deliver() for details.
-+ * ring_buffer_get_records_count() must be called to get the records count
-+ * before this function, because it resets the records_commit count.
-+ */
-+static inline
-+unsigned long subbuffer_count_records_overrun(
-+ const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_backend *bufb,
-+ unsigned long idx)
-+{
-+ struct lib_ring_buffer_backend_pages *pages;
-+ unsigned long overruns, sb_bindex;
-+
-+ sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
-+ pages = bufb->array[sb_bindex];
-+ overruns = v_read(config, &pages->records_unread);
-+ v_set(config, &pages->records_unread,
-+ v_read(config, &pages->records_commit));
-+ v_set(config, &pages->records_commit, 0);
-+
-+ return overruns;
-+}
-+
-+static inline
-+void subbuffer_set_data_size(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_backend *bufb,
-+ unsigned long idx,
-+ unsigned long data_size)
-+{
-+ struct lib_ring_buffer_backend_pages *pages;
-+ unsigned long sb_bindex;
-+
-+ sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
-+ pages = bufb->array[sb_bindex];
-+ pages->data_size = data_size;
-+}
-+
-+static inline
-+unsigned long subbuffer_get_read_data_size(
-+ const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_backend *bufb)
-+{
-+ struct lib_ring_buffer_backend_pages *pages;
-+ unsigned long sb_bindex;
-+
-+ sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
-+ pages = bufb->array[sb_bindex];
-+ return pages->data_size;
-+}
-+
-+static inline
-+unsigned long subbuffer_get_data_size(
-+ const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_backend *bufb,
-+ unsigned long idx)
-+{
-+ struct lib_ring_buffer_backend_pages *pages;
-+ unsigned long sb_bindex;
-+
-+ sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
-+ pages = bufb->array[sb_bindex];
-+ return pages->data_size;
-+}
-+
-+/**
-+ * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
-+ * writer.
-+ */
-+static inline
-+void lib_ring_buffer_clear_noref(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_backend *bufb,
-+ unsigned long idx)
-+{
-+ unsigned long id, new_id;
-+
-+ if (config->mode != RING_BUFFER_OVERWRITE)
-+ return;
-+
-+ /*
-+ * Performing a volatile access to read the sb_pages, because we want to
-+ * read a coherent version of the pointer and the associated noref flag.
-+ */
-+ id = ACCESS_ONCE(bufb->buf_wsb[idx].id);
-+ for (;;) {
-+ /* This check is called on the fast path for each record. */
-+ if (likely(!subbuffer_id_is_noref(config, id))) {
-+ /*
-+ * Store after load dependency ordering the writes to
-+ * the subbuffer after load and test of the noref flag
-+ * matches the memory barrier implied by the cmpxchg()
-+ * in update_read_sb_index().
-+ */
-+ return; /* Already writing to this buffer */
-+ }
-+ new_id = id;
-+ subbuffer_id_clear_noref(config, &new_id);
-+ new_id = cmpxchg(&bufb->buf_wsb[idx].id, id, new_id);
-+ if (likely(new_id == id))
-+ break;
-+ id = new_id;
-+ }
-+}
-+
-+/**
-+ * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
-+ * called by writer.
-+ */
-+static inline
-+void lib_ring_buffer_set_noref_offset(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_backend *bufb,
-+ unsigned long idx, unsigned long offset)
-+{
-+ if (config->mode != RING_BUFFER_OVERWRITE)
-+ return;
-+
-+ /*
-+ * Because ring_buffer_set_noref() is only called by a single thread
-+ * (the one which updated the cc_sb value), there are no concurrent
-+ * updates to take care of: other writers have not updated cc_sb, so
-+ * they cannot set the noref flag, and concurrent readers cannot modify
-+ * the pointer because the noref flag is not set yet.
-+ * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
-+ * to the subbuffer before this set noref operation.
-+ * subbuffer_set_noref() uses a volatile store to deal with concurrent
-+ * readers of the noref flag.
-+ */
-+ CHAN_WARN_ON(bufb->chan,
-+ subbuffer_id_is_noref(config, bufb->buf_wsb[idx].id));
-+ /*
-+ * Memory barrier that ensures counter stores are ordered before set
-+ * noref and offset.
-+ */
-+ smp_mb();
-+ subbuffer_id_set_noref_offset(config, &bufb->buf_wsb[idx].id, offset);
-+}
-+
-+/**
-+ * update_read_sb_index - Read-side subbuffer index update.
-+ */
-+static inline
-+int update_read_sb_index(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_backend *bufb,
-+ struct channel_backend *chanb,
-+ unsigned long consumed_idx,
-+ unsigned long consumed_count)
-+{
-+ unsigned long old_id, new_id;
-+
-+ if (config->mode == RING_BUFFER_OVERWRITE) {
-+ /*
-+ * Exchange the target writer subbuffer with our own unused
-+ * subbuffer. No need to use ACCESS_ONCE() here to read the
-+ * old_wpage, because the value read will be confirmed by the
-+ * following cmpxchg().
-+ */
-+ old_id = bufb->buf_wsb[consumed_idx].id;
-+ if (unlikely(!subbuffer_id_is_noref(config, old_id)))
-+ return -EAGAIN;
-+ /*
-+ * Make sure the offset count we are expecting matches the one
-+ * indicated by the writer.
-+ */
-+ if (unlikely(!subbuffer_id_compare_offset(config, old_id,
-+ consumed_count)))
-+ return -EAGAIN;
-+ CHAN_WARN_ON(bufb->chan,
-+ !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
-+ subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
-+ consumed_count);
-+ new_id = cmpxchg(&bufb->buf_wsb[consumed_idx].id, old_id,
-+ bufb->buf_rsb.id);
-+ if (unlikely(old_id != new_id))
-+ return -EAGAIN;
-+ bufb->buf_rsb.id = new_id;
-+ } else {
-+ /* No page exchange, use the writer page directly */
-+ bufb->buf_rsb.id = bufb->buf_wsb[consumed_idx].id;
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * Use the architecture-specific memcpy implementation for constant-sized
-+ * inputs, but rely on an inline memcpy for length statically unknown.
-+ * The function call to memcpy is just way too expensive for a fast path.
-+ */
-+#define lib_ring_buffer_do_copy(config, dest, src, len) \
-+do { \
-+ size_t __len = (len); \
-+ if (__builtin_constant_p(len)) \
-+ memcpy(dest, src, __len); \
-+ else \
-+ inline_memcpy(dest, src, __len); \
-+} while (0)
-+
-+/*
-+ * We use __copy_from_user to copy userspace data since we already
-+ * did the access_ok for the whole range.
-+ */
-+static inline
-+unsigned long lib_ring_buffer_do_copy_from_user(void *dest,
-+ const void __user *src,
-+ unsigned long len)
-+{
-+ return __copy_from_user(dest, src, len);
-+}
-+
-+/*
-+ * write len bytes to dest with c
-+ */
-+static inline
-+void lib_ring_buffer_do_memset(char *dest, int c,
-+ unsigned long len)
-+{
-+ unsigned long i;
-+
-+ for (i = 0; i < len; i++)
-+ dest[i] = c;
-+}
-+
-+#endif /* _LINUX_RING_BUFFER_BACKEND_INTERNAL_H */
-diff --git a/drivers/staging/lttng/lib/ringbuffer/backend_types.h b/drivers/staging/lttng/lib/ringbuffer/backend_types.h
-new file mode 100644
-index 0000000..1d301de
---- /dev/null
-+++ b/drivers/staging/lttng/lib/ringbuffer/backend_types.h
-@@ -0,0 +1,80 @@
-+#ifndef _LINUX_RING_BUFFER_BACKEND_TYPES_H
-+#define _LINUX_RING_BUFFER_BACKEND_TYPES_H
-+
-+/*
-+ * linux/ringbuffer/backend_types.h
-+ *
-+ * Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Ring buffer backend (types).
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/cpumask.h>
-+#include <linux/types.h>
-+
-+struct lib_ring_buffer_backend_page {
-+ void *virt; /* page virtual address (cached) */
-+ struct page *page; /* pointer to page structure */
-+};
-+
-+struct lib_ring_buffer_backend_pages {
-+ unsigned long mmap_offset; /* offset of the subbuffer in mmap */
-+ union v_atomic records_commit; /* current records committed count */
-+ union v_atomic records_unread; /* records to read */
-+ unsigned long data_size; /* Amount of data to read from subbuf */
-+ struct lib_ring_buffer_backend_page p[];
-+};
-+
-+struct lib_ring_buffer_backend_subbuffer {
-+ /* Identifier for subbuf backend pages. Exchanged atomically. */
-+ unsigned long id; /* backend subbuffer identifier */
-+};
-+
-+/*
-+ * Forward declaration of frontend-specific channel and ring_buffer.
-+ */
-+struct channel;
-+struct lib_ring_buffer;
-+
-+struct lib_ring_buffer_backend {
-+ /* Array of ring_buffer_backend_subbuffer for writer */
-+ struct lib_ring_buffer_backend_subbuffer *buf_wsb;
-+ /* ring_buffer_backend_subbuffer for reader */
-+ struct lib_ring_buffer_backend_subbuffer buf_rsb;
-+ /*
-+ * Pointer array of backend pages, for whole buffer.
-+ * Indexed by ring_buffer_backend_subbuffer identifier (id) index.
-+ */
-+ struct lib_ring_buffer_backend_pages **array;
-+ unsigned int num_pages_per_subbuf;
-+
-+ struct channel *chan; /* Associated channel */
-+ int cpu; /* This buffer's cpu. -1 if global. */
-+ union v_atomic records_read; /* Number of records read */
-+ unsigned int allocated:1; /* Bool: is buffer allocated ? */
-+};
-+
-+struct channel_backend {
-+ unsigned long buf_size; /* Size of the buffer */
-+ unsigned long subbuf_size; /* Sub-buffer size */
-+ unsigned int subbuf_size_order; /* Order of sub-buffer size */
-+ unsigned int num_subbuf_order; /*
-+ * Order of number of sub-buffers/buffer
-+ * for writer.
-+ */
-+ unsigned int buf_size_order; /* Order of buffer size */
-+ int extra_reader_sb:1; /* Bool: has extra reader subbuffer */
-+ struct lib_ring_buffer *buf; /* Channel per-cpu buffers */
-+
-+ unsigned long num_subbuf; /* Number of sub-buffers for writer */
-+ u64 start_tsc; /* Channel creation TSC value */
-+ void *priv; /* Client-specific information */
-+ struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */
-+ const struct lib_ring_buffer_config *config; /* Ring buffer configuration */
-+ cpumask_var_t cpumask; /* Allocated per-cpu buffers cpumask */
-+ char name[NAME_MAX]; /* Channel name */
-+};
-+
-+#endif /* _LINUX_RING_BUFFER_BACKEND_TYPES_H */
-diff --git a/drivers/staging/lttng/lib/ringbuffer/config.h b/drivers/staging/lttng/lib/ringbuffer/config.h
-new file mode 100644
-index 0000000..fd73d55
---- /dev/null
-+++ b/drivers/staging/lttng/lib/ringbuffer/config.h
-@@ -0,0 +1,298 @@
-+#ifndef _LINUX_RING_BUFFER_CONFIG_H
-+#define _LINUX_RING_BUFFER_CONFIG_H
-+
-+/*
-+ * linux/ringbuffer/config.h
-+ *
-+ * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Ring buffer configuration header. Note: after declaring the standard inline
-+ * functions, clients should also include linux/ringbuffer/api.h.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/types.h>
-+#include <linux/percpu.h>
-+#include "../align.h"
-+
-+struct lib_ring_buffer;
-+struct channel;
-+struct lib_ring_buffer_config;
-+struct lib_ring_buffer_ctx;
-+
-+/*
-+ * Ring buffer client callbacks. Only used by slow path, never on fast path.
-+ * For the fast path, record_header_size(), ring_buffer_clock_read() should be
-+ * provided as inline functions too. These may simply return 0 if not used by
-+ * the client.
-+ */
-+struct lib_ring_buffer_client_cb {
-+ /* Mandatory callbacks */
-+
-+ /* A static inline version is also required for fast path */
-+ u64 (*ring_buffer_clock_read) (struct channel *chan);
-+ size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
-+ struct channel *chan, size_t offset,
-+ size_t *pre_header_padding,
-+ struct lib_ring_buffer_ctx *ctx);
-+
-+ /* Slow path only, at subbuffer switch */
-+ size_t (*subbuffer_header_size) (void);
-+ void (*buffer_begin) (struct lib_ring_buffer *buf, u64 tsc,
-+ unsigned int subbuf_idx);
-+ void (*buffer_end) (struct lib_ring_buffer *buf, u64 tsc,
-+ unsigned int subbuf_idx, unsigned long data_size);
-+
-+ /* Optional callbacks (can be set to NULL) */
-+
-+ /* Called at buffer creation/finalize */
-+ int (*buffer_create) (struct lib_ring_buffer *buf, void *priv,
-+ int cpu, const char *name);
-+ /*
-+ * Clients should guarantee that no new reader handle can be opened
-+ * after finalize.
-+ */
-+ void (*buffer_finalize) (struct lib_ring_buffer *buf, void *priv, int cpu);
-+
-+ /*
-+ * Extract header length, payload length and timestamp from event
-+ * record. Used by buffer iterators. Timestamp is only used by channel
-+ * iterator.
-+ */
-+ void (*record_get) (const struct lib_ring_buffer_config *config,
-+ struct channel *chan, struct lib_ring_buffer *buf,
-+ size_t offset, size_t *header_len,
-+ size_t *payload_len, u64 *timestamp);
-+};
-+
-+/*
-+ * Ring buffer instance configuration.
-+ *
-+ * Declare as "static const" within the client object to ensure the inline fast
-+ * paths can be optimized.
-+ *
-+ * alloc/sync pairs:
-+ *
-+ * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
-+ * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
-+ * with preemption disabled (lib_ring_buffer_get_cpu() and
-+ * lib_ring_buffer_put_cpu()).
-+ *
-+ * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
-+ * Per-cpu buffer with global synchronization. Tracing can be performed with
-+ * preemption enabled, statistically stays on the local buffers.
-+ *
-+ * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
-+ * Should only be used for buffers belonging to a single thread or protected
-+ * by mutual exclusion by the client. Note that periodical sub-buffer switch
-+ * should be disabled in this kind of configuration.
-+ *
-+ * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
-+ * Global shared buffer with global synchronization.
-+ *
-+ * wakeup:
-+ *
-+ * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu deferrable timers to poll the
-+ * buffers and wake up readers if data is ready. Mainly useful for tracers which
-+ * don't want to call into the wakeup code on the tracing path. Use in
-+ * combination with "read_timer_interval" channel_create() argument.
-+ *
-+ * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
-+ * ready to read. Lower latencies before the reader is woken up. Mainly suitable
-+ * for drivers.
-+ *
-+ * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
-+ * has the responsibility to perform wakeups.
-+ */
-+struct lib_ring_buffer_config {
-+ enum {
-+ RING_BUFFER_ALLOC_PER_CPU,
-+ RING_BUFFER_ALLOC_GLOBAL,
-+ } alloc;
-+ enum {
-+ RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
-+ RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
-+ } sync;
-+ enum {
-+ RING_BUFFER_OVERWRITE, /* Overwrite when buffer full */
-+ RING_BUFFER_DISCARD, /* Discard when buffer full */
-+ } mode;
-+ enum {
-+ RING_BUFFER_SPLICE,
-+ RING_BUFFER_MMAP,
-+ RING_BUFFER_READ, /* TODO */
-+ RING_BUFFER_ITERATOR,
-+ RING_BUFFER_NONE,
-+ } output;
-+ enum {
-+ RING_BUFFER_PAGE,
-+ RING_BUFFER_VMAP, /* TODO */
-+ RING_BUFFER_STATIC, /* TODO */
-+ } backend;
-+ enum {
-+ RING_BUFFER_NO_OOPS_CONSISTENCY,
-+ RING_BUFFER_OOPS_CONSISTENCY,
-+ } oops;
-+ enum {
-+ RING_BUFFER_IPI_BARRIER,
-+ RING_BUFFER_NO_IPI_BARRIER,
-+ } ipi;
-+ enum {
-+ RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
-+ RING_BUFFER_WAKEUP_BY_WRITER, /*
-+ * writer wakes up reader,
-+ * not lock-free
-+ * (takes spinlock).
-+ */
-+ } wakeup;
-+ /*
-+ * tsc_bits: timestamp bits saved at each record.
-+ * 0 and 64 disable the timestamp compression scheme.
-+ */
-+ unsigned int tsc_bits;
-+ struct lib_ring_buffer_client_cb cb;
-+};
-+
-+/*
-+ * ring buffer context
-+ *
-+ * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
-+ * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
-+ * lib_ring_buffer_write().
-+ */
-+struct lib_ring_buffer_ctx {
-+ /* input received by lib_ring_buffer_reserve(), saved here. */
-+ struct channel *chan; /* channel */
-+ void *priv; /* client private data */
-+ size_t data_size; /* size of payload */
-+ int largest_align; /*
-+ * alignment of the largest element
-+ * in the payload
-+ */
-+ int cpu; /* processor id */
-+
-+ /* output from lib_ring_buffer_reserve() */
-+ struct lib_ring_buffer *buf; /*
-+ * buffer corresponding to processor id
-+ * for this channel
-+ */
-+ size_t slot_size; /* size of the reserved slot */
-+ unsigned long buf_offset; /* offset following the record header */
-+ unsigned long pre_offset; /*
-+ * Initial offset position _before_
-+ * the record is written. Positioned
-+ * prior to record header alignment
-+ * padding.
-+ */
-+ u64 tsc; /* time-stamp counter value */
-+ unsigned int rflags; /* reservation flags */
-+};
-+
-+/**
-+ * lib_ring_buffer_ctx_init - initialize ring buffer context
-+ * @ctx: ring buffer context to initialize
-+ * @chan: channel
-+ * @priv: client private data
-+ * @data_size: size of record data payload
-+ * @largest_align: largest alignment within data payload types
-+ * @cpu: processor id
-+ */
-+static inline
-+void lib_ring_buffer_ctx_init(struct lib_ring_buffer_ctx *ctx,
-+ struct channel *chan, void *priv,
-+ size_t data_size, int largest_align,
-+ int cpu)
-+{
-+ ctx->chan = chan;
-+ ctx->priv = priv;
-+ ctx->data_size = data_size;
-+ ctx->largest_align = largest_align;
-+ ctx->cpu = cpu;
-+ ctx->rflags = 0;
-+}
-+
-+/*
-+ * Reservation flags.
-+ *
-+ * RING_BUFFER_RFLAG_FULL_TSC
-+ *
-+ * This flag is passed to record_header_size() and to the primitive used to
-+ * write the record header. It indicates that the full 64-bit time value is
-+ * needed in the record header. If this flag is not set, the record header needs
-+ * only to contain "tsc_bits" bit of time value.
-+ *
-+ * Reservation flags can be added by the client, starting from
-+ * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
-+ * record_header_size() to lib_ring_buffer_write_record_header().
-+ */
-+#define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
-+#define RING_BUFFER_RFLAG_END (1U << 1)
-+
-+/*
-+ * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
-+ * compile-time. We have to duplicate the "config->align" information and the
-+ * definition here because config->align is used both in the slow and fast
-+ * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
-+ */
-+#ifdef RING_BUFFER_ALIGN
-+
-+# define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
-+
-+/*
-+ * Calculate the offset needed to align the type.
-+ * size_of_type must be non-zero.
-+ */
-+static inline
-+unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
-+{
-+ return offset_align(align_drift, size_of_type);
-+}
-+
-+#else
-+
-+# define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
-+
-+/*
-+ * Calculate the offset needed to align the type.
-+ * size_of_type must be non-zero.
-+ */
-+static inline
-+unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
-+{
-+ return 0;
-+}
-+
-+#endif
-+
-+/**
-+ * lib_ring_buffer_align_ctx - Align context offset on "alignment"
-+ * @ctx: ring buffer context.
-+ */
-+static inline
-+void lib_ring_buffer_align_ctx(struct lib_ring_buffer_ctx *ctx,
-+ size_t alignment)
-+{
-+ ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
-+ alignment);
-+}
-+
-+/*
-+ * lib_ring_buffer_check_config() returns 0 on success.
-+ * Used internally to check for valid configurations at channel creation.
-+ */
-+static inline
-+int lib_ring_buffer_check_config(const struct lib_ring_buffer_config *config,
-+ unsigned int switch_timer_interval,
-+ unsigned int read_timer_interval)
-+{
-+ if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
-+ && config->sync == RING_BUFFER_SYNC_PER_CPU
-+ && switch_timer_interval)
-+ return -EINVAL;
-+ return 0;
-+}
-+
-+#include "../../wrapper/ringbuffer/vatomic.h"
-+
-+#endif /* _LINUX_RING_BUFFER_CONFIG_H */
-diff --git a/drivers/staging/lttng/lib/ringbuffer/frontend.h b/drivers/staging/lttng/lib/ringbuffer/frontend.h
-new file mode 100644
-index 0000000..01af77a
---- /dev/null
-+++ b/drivers/staging/lttng/lib/ringbuffer/frontend.h
-@@ -0,0 +1,228 @@
-+#ifndef _LINUX_RING_BUFFER_FRONTEND_H
-+#define _LINUX_RING_BUFFER_FRONTEND_H
-+
-+/*
-+ * linux/ringbuffer/frontend.h
-+ *
-+ * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Ring Buffer Library Synchronization Header (API).
-+ *
-+ * Author:
-+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * See ring_buffer_frontend.c for more information on wait-free algorithms.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/pipe_fs_i.h>
-+#include <linux/rcupdate.h>
-+#include <linux/cpumask.h>
-+#include <linux/module.h>
-+#include <linux/bitops.h>
-+#include <linux/splice.h>
-+#include <linux/string.h>
-+#include <linux/timer.h>
-+#include <linux/sched.h>
-+#include <linux/cache.h>
-+#include <linux/time.h>
-+#include <linux/slab.h>
-+#include <linux/init.h>
-+#include <linux/stat.h>
-+#include <linux/cpu.h>
-+#include <linux/fs.h>
-+
-+#include <asm/atomic.h>
-+#include <asm/local.h>
-+
-+/* Internal helpers */
-+#include "../../wrapper/ringbuffer/frontend_internal.h"
-+
-+/* Buffer creation/removal and setup operations */
-+
-+/*
-+ * switch_timer_interval is the time interval (in us) to fill sub-buffers with
-+ * padding to let readers get those sub-buffers. Used for live streaming.
-+ *
-+ * read_timer_interval is the time interval (in us) to wake up pending readers.
-+ *
-+ * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
-+ * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
-+ * be set to NULL for other backends.
-+ */
-+
-+extern
-+struct channel *channel_create(const struct lib_ring_buffer_config *config,
-+ const char *name, void *priv,
-+ void *buf_addr,
-+ size_t subbuf_size, size_t num_subbuf,
-+ unsigned int switch_timer_interval,
-+ unsigned int read_timer_interval);
-+
-+/*
-+ * channel_destroy returns the private data pointer. It finalizes all channel's
-+ * buffers, waits for readers to release all references, and destroys the
-+ * channel.
-+ */
-+extern
-+void *channel_destroy(struct channel *chan);
-+
-+
-+/* Buffer read operations */
-+
-+/*
-+ * Iteration on channel cpumask needs to issue a read barrier to match the write
-+ * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
-+ * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
-+ * only performed at channel destruction.
-+ */
-+#define for_each_channel_cpu(cpu, chan) \
-+ for ((cpu) = -1; \
-+ ({ (cpu) = cpumask_next(cpu, (chan)->backend.cpumask); \
-+ smp_read_barrier_depends(); (cpu) < nr_cpu_ids; });)
-+
-+extern struct lib_ring_buffer *channel_get_ring_buffer(
-+ const struct lib_ring_buffer_config *config,
-+ struct channel *chan, int cpu);
-+extern int lib_ring_buffer_open_read(struct lib_ring_buffer *buf);
-+extern void lib_ring_buffer_release_read(struct lib_ring_buffer *buf);
-+
-+/*
-+ * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
-+ */
-+extern int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
-+ unsigned long *consumed,
-+ unsigned long *produced);
-+extern void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
-+ unsigned long consumed_new);
-+
-+extern int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
-+ unsigned long consumed);
-+extern void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf);
-+
-+/*
-+ * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
-+ * to read sub-buffers sequentially.
-+ */
-+static inline int lib_ring_buffer_get_next_subbuf(struct lib_ring_buffer *buf)
-+{
-+ int ret;
-+
-+ ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
-+ &buf->prod_snapshot);
-+ if (ret)
-+ return ret;
-+ ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot);
-+ return ret;
-+}
-+
-+static inline void lib_ring_buffer_put_next_subbuf(struct lib_ring_buffer *buf)
-+{
-+ lib_ring_buffer_put_subbuf(buf);
-+ lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot,
-+ buf->backend.chan));
-+}
-+
-+extern void channel_reset(struct channel *chan);
-+extern void lib_ring_buffer_reset(struct lib_ring_buffer *buf);
-+
-+static inline
-+unsigned long lib_ring_buffer_get_offset(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf)
-+{
-+ return v_read(config, &buf->offset);
-+}
-+
-+static inline
-+unsigned long lib_ring_buffer_get_consumed(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf)
-+{
-+ return atomic_long_read(&buf->consumed);
-+}
-+
-+/*
-+ * Must call lib_ring_buffer_is_finalized before reading counters (memory
-+ * ordering enforced with respect to trace teardown).
-+ */
-+static inline
-+int lib_ring_buffer_is_finalized(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf)
-+{
-+ int finalized = ACCESS_ONCE(buf->finalized);
-+ /*
-+ * Read finalized before counters.
-+ */
-+ smp_rmb();
-+ return finalized;
-+}
-+
-+static inline
-+int lib_ring_buffer_channel_is_finalized(const struct channel *chan)
-+{
-+ return chan->finalized;
-+}
-+
-+static inline
-+int lib_ring_buffer_channel_is_disabled(const struct channel *chan)
-+{
-+ return atomic_read(&chan->record_disabled);
-+}
-+
-+static inline
-+unsigned long lib_ring_buffer_get_read_data_size(
-+ const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf)
-+{
-+ return subbuffer_get_read_data_size(config, &buf->backend);
-+}
-+
-+static inline
-+unsigned long lib_ring_buffer_get_records_count(
-+ const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf)
-+{
-+ return v_read(config, &buf->records_count);
-+}
-+
-+static inline
-+unsigned long lib_ring_buffer_get_records_overrun(
-+ const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf)
-+{
-+ return v_read(config, &buf->records_overrun);
-+}
-+
-+static inline
-+unsigned long lib_ring_buffer_get_records_lost_full(
-+ const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf)
-+{
-+ return v_read(config, &buf->records_lost_full);
-+}
-+
-+static inline
-+unsigned long lib_ring_buffer_get_records_lost_wrap(
-+ const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf)
-+{
-+ return v_read(config, &buf->records_lost_wrap);
-+}
-+
-+static inline
-+unsigned long lib_ring_buffer_get_records_lost_big(
-+ const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf)
-+{
-+ return v_read(config, &buf->records_lost_big);
-+}
-+
-+static inline
-+unsigned long lib_ring_buffer_get_records_read(
-+ const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf)
-+{
-+ return v_read(config, &buf->backend.records_read);
-+}
-+
-+#endif /* _LINUX_RING_BUFFER_FRONTEND_H */
-diff --git a/drivers/staging/lttng/lib/ringbuffer/frontend_api.h b/drivers/staging/lttng/lib/ringbuffer/frontend_api.h
-new file mode 100644
-index 0000000..391e593
---- /dev/null
-+++ b/drivers/staging/lttng/lib/ringbuffer/frontend_api.h
-@@ -0,0 +1,358 @@
-+#ifndef _LINUX_RING_BUFFER_FRONTEND_API_H
-+#define _LINUX_RING_BUFFER_FRONTEND_API_H
-+
-+/*
-+ * linux/ringbuffer/frontend_api.h
-+ *
-+ * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Ring Buffer Library Synchronization Header (buffer write API).
-+ *
-+ * Author:
-+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * See ring_buffer_frontend.c for more information on wait-free algorithms.
-+ * See linux/ringbuffer/frontend.h for channel allocation and read-side API.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include "../../wrapper/ringbuffer/frontend.h"
-+#include <linux/errno.h>
-+
-+/**
-+ * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
-+ *
-+ * Disables preemption (acts as a RCU read-side critical section) and keeps a
-+ * ring buffer nesting count as supplementary safety net to ensure tracer client
-+ * code will never trigger an endless recursion. Returns the processor ID on
-+ * success, -EPERM on failure (nesting count too high).
-+ *
-+ * asm volatile and "memory" clobber prevent the compiler from moving
-+ * instructions out of the ring buffer nesting count. This is required to ensure
-+ * that probe side-effects which can cause recursion (e.g. unforeseen traps,
-+ * divisions by 0, ...) are triggered within the incremented nesting count
-+ * section.
-+ */
-+static inline
-+int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config)
-+{
-+ int cpu, nesting;
-+
-+ rcu_read_lock_sched_notrace();
-+ cpu = smp_processor_id();
-+ nesting = ++per_cpu(lib_ring_buffer_nesting, cpu);
-+ barrier();
-+
-+ if (unlikely(nesting > 4)) {
-+ WARN_ON_ONCE(1);
-+ per_cpu(lib_ring_buffer_nesting, cpu)--;
-+ rcu_read_unlock_sched_notrace();
-+ return -EPERM;
-+ } else
-+ return cpu;
-+}
-+
-+/**
-+ * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit.
-+ */
-+static inline
-+void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
-+{
-+ barrier();
-+ __get_cpu_var(lib_ring_buffer_nesting)--;
-+ rcu_read_unlock_sched_notrace();
-+}
-+
-+/*
-+ * lib_ring_buffer_try_reserve is called by lib_ring_buffer_reserve(). It is not
-+ * part of the API per se.
-+ *
-+ * returns 0 if reserve ok, or 1 if the slow path must be taken.
-+ */
-+static inline
-+int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_ctx *ctx,
-+ unsigned long *o_begin, unsigned long *o_end,
-+ unsigned long *o_old, size_t *before_hdr_pad)
-+{
-+ struct channel *chan = ctx->chan;
-+ struct lib_ring_buffer *buf = ctx->buf;
-+ *o_begin = v_read(config, &buf->offset);
-+ *o_old = *o_begin;
-+
-+ ctx->tsc = lib_ring_buffer_clock_read(chan);
-+ if ((int64_t) ctx->tsc == -EIO)
-+ return 1;
-+
-+ /*
-+ * Prefetch cacheline for read because we have to read the previous
-+ * commit counter to increment it and commit seq value to compare it to
-+ * the commit counter.
-+ */
-+ prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
-+
-+ if (last_tsc_overflow(config, buf, ctx->tsc))
-+ ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
-+
-+ if (unlikely(subbuf_offset(*o_begin, chan) == 0))
-+ return 1;
-+
-+ ctx->slot_size = record_header_size(config, chan, *o_begin,
-+ before_hdr_pad, ctx);
-+ ctx->slot_size +=
-+ lib_ring_buffer_align(*o_begin + ctx->slot_size,
-+ ctx->largest_align) + ctx->data_size;
-+ if (unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
-+ > chan->backend.subbuf_size))
-+ return 1;
-+
-+ /*
-+ * Record fits in the current buffer and we are not on a switch
-+ * boundary. It's safe to write.
-+ */
-+ *o_end = *o_begin + ctx->slot_size;
-+
-+ if (unlikely((subbuf_offset(*o_end, chan)) == 0))
-+ /*
-+ * The offset_end will fall at the very beginning of the next
-+ * subbuffer.
-+ */
-+ return 1;
-+
-+ return 0;
-+}
-+
-+/**
-+ * lib_ring_buffer_reserve - Reserve space in a ring buffer.
-+ * @config: ring buffer instance configuration.
-+ * @ctx: ring buffer context. (input and output) Must be already initialized.
-+ *
-+ * Atomic wait-free slot reservation. The reserved space starts at the context
-+ * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc".
-+ *
-+ * Return :
-+ * 0 on success.
-+ * -EAGAIN if channel is disabled.
-+ * -ENOSPC if event size is too large for packet.
-+ * -ENOBUFS if there is currently not enough space in buffer for the event.
-+ * -EIO if data cannot be written into the buffer for any other reason.
-+ */
-+
-+static inline
-+int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_ctx *ctx)
-+{
-+ struct channel *chan = ctx->chan;
-+ struct lib_ring_buffer *buf;
-+ unsigned long o_begin, o_end, o_old;
-+ size_t before_hdr_pad = 0;
-+
-+ if (atomic_read(&chan->record_disabled))
-+ return -EAGAIN;
-+
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-+ buf = per_cpu_ptr(chan->backend.buf, ctx->cpu);
-+ else
-+ buf = chan->backend.buf;
-+ if (atomic_read(&buf->record_disabled))
-+ return -EAGAIN;
-+ ctx->buf = buf;
-+
-+ /*
-+ * Perform retryable operations.
-+ */
-+ if (unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
-+ &o_end, &o_old, &before_hdr_pad)))
-+ goto slow_path;
-+
-+ if (unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
-+ != o_old))
-+ goto slow_path;
-+
-+ /*
-+ * Atomically update last_tsc. This update races against concurrent
-+ * atomic updates, but the race will always cause supplementary full TSC
-+ * record headers, never the opposite (missing a full TSC record header
-+ * when it would be needed).
-+ */
-+ save_last_tsc(config, ctx->buf, ctx->tsc);
-+
-+ /*
-+ * Push the reader if necessary
-+ */
-+ lib_ring_buffer_reserve_push_reader(ctx->buf, chan, o_end - 1);
-+
-+ /*
-+ * Clear noref flag for this subbuffer.
-+ */
-+ lib_ring_buffer_clear_noref(config, &ctx->buf->backend,
-+ subbuf_index(o_end - 1, chan));
-+
-+ ctx->pre_offset = o_begin;
-+ ctx->buf_offset = o_begin + before_hdr_pad;
-+ return 0;
-+slow_path:
-+ return lib_ring_buffer_reserve_slow(ctx);
-+}
-+
-+/**
-+ * lib_ring_buffer_switch - Perform a sub-buffer switch for a per-cpu buffer.
-+ * @config: ring buffer instance configuration.
-+ * @buf: buffer
-+ * @mode: buffer switch mode (SWITCH_ACTIVE or SWITCH_FLUSH)
-+ *
-+ * This operation is completely reentrant : can be called while tracing is
-+ * active with absolutely no lock held.
-+ *
-+ * Note, however, that as a v_cmpxchg is used for some atomic operations and
-+ * requires to be executed locally for per-CPU buffers, this function must be
-+ * called from the CPU which owns the buffer for a ACTIVE flush, with preemption
-+ * disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
-+ */
-+static inline
-+void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf, enum switch_mode mode)
-+{
-+ lib_ring_buffer_switch_slow(buf, mode);
-+}
-+
-+/* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */
-+
-+/**
-+ * lib_ring_buffer_commit - Commit an record.
-+ * @config: ring buffer instance configuration.
-+ * @ctx: ring buffer context. (input arguments only)
-+ *
-+ * Atomic unordered slot commit. Increments the commit count in the
-+ * specified sub-buffer, and delivers it if necessary.
-+ */
-+static inline
-+void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
-+ const struct lib_ring_buffer_ctx *ctx)
-+{
-+ struct channel *chan = ctx->chan;
-+ struct lib_ring_buffer *buf = ctx->buf;
-+ unsigned long offset_end = ctx->buf_offset;
-+ unsigned long endidx = subbuf_index(offset_end - 1, chan);
-+ unsigned long commit_count;
-+
-+ /*
-+ * Must count record before incrementing the commit count.
-+ */
-+ subbuffer_count_record(config, &buf->backend, endidx);
-+
-+ /*
-+ * Order all writes to buffer before the commit count update that will
-+ * determine that the subbuffer is full.
-+ */
-+ if (config->ipi == RING_BUFFER_IPI_BARRIER) {
-+ /*
-+ * Must write slot data before incrementing commit count. This
-+ * compiler barrier is upgraded into a smp_mb() by the IPI sent
-+ * by get_subbuf().
-+ */
-+ barrier();
-+ } else
-+ smp_wmb();
-+
-+ v_add(config, ctx->slot_size, &buf->commit_hot[endidx].cc);
-+
-+ /*
-+ * commit count read can race with concurrent OOO commit count updates.
-+ * This is only needed for lib_ring_buffer_check_deliver (for
-+ * non-polling delivery only) and for
-+ * lib_ring_buffer_write_commit_counter. The race can only cause the
-+ * counter to be read with the same value more than once, which could
-+ * cause :
-+ * - Multiple delivery for the same sub-buffer (which is handled
-+ * gracefully by the reader code) if the value is for a full
-+ * sub-buffer. It's important that we can never miss a sub-buffer
-+ * delivery. Re-reading the value after the v_add ensures this.
-+ * - Reading a commit_count with a higher value that what was actually
-+ * added to it for the lib_ring_buffer_write_commit_counter call
-+ * (again caused by a concurrent committer). It does not matter,
-+ * because this function is interested in the fact that the commit
-+ * count reaches back the reserve offset for a specific sub-buffer,
-+ * which is completely independent of the order.
-+ */
-+ commit_count = v_read(config, &buf->commit_hot[endidx].cc);
-+
-+ lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
-+ commit_count, endidx);
-+ /*
-+ * Update used size at each commit. It's needed only for extracting
-+ * ring_buffer buffers from vmcore, after crash.
-+ */
-+ lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
-+ ctx->buf_offset, commit_count,
-+ ctx->slot_size);
-+}
-+
-+/**
-+ * lib_ring_buffer_try_discard_reserve - Try discarding a record.
-+ * @config: ring buffer instance configuration.
-+ * @ctx: ring buffer context. (input arguments only)
-+ *
-+ * Only succeeds if no other record has been written after the record to
-+ * discard. If discard fails, the record must be committed to the buffer.
-+ *
-+ * Returns 0 upon success, -EPERM if the record cannot be discarded.
-+ */
-+static inline
-+int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config,
-+ const struct lib_ring_buffer_ctx *ctx)
-+{
-+ struct lib_ring_buffer *buf = ctx->buf;
-+ unsigned long end_offset = ctx->pre_offset + ctx->slot_size;
-+
-+ /*
-+ * We need to ensure that if the cmpxchg succeeds and discards the
-+ * record, the next record will record a full TSC, because it cannot
-+ * rely on the last_tsc associated with the discarded record to detect
-+ * overflows. The only way to ensure this is to set the last_tsc to 0
-+ * (assuming no 64-bit TSC overflow), which forces to write a 64-bit
-+ * timestamp in the next record.
-+ *
-+ * Note: if discard fails, we must leave the TSC in the record header.
-+ * It is needed to keep track of TSC overflows for the following
-+ * records.
-+ */
-+ save_last_tsc(config, buf, 0ULL);
-+
-+ if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
-+ != end_offset))
-+ return -EPERM;
-+ else
-+ return 0;
-+}
-+
-+static inline
-+void channel_record_disable(const struct lib_ring_buffer_config *config,
-+ struct channel *chan)
-+{
-+ atomic_inc(&chan->record_disabled);
-+}
-+
-+static inline
-+void channel_record_enable(const struct lib_ring_buffer_config *config,
-+ struct channel *chan)
-+{
-+ atomic_dec(&chan->record_disabled);
-+}
-+
-+static inline
-+void lib_ring_buffer_record_disable(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf)
-+{
-+ atomic_inc(&buf->record_disabled);
-+}
-+
-+static inline
-+void lib_ring_buffer_record_enable(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf)
-+{
-+ atomic_dec(&buf->record_disabled);
-+}
-+
-+#endif /* _LINUX_RING_BUFFER_FRONTEND_API_H */
-diff --git a/drivers/staging/lttng/lib/ringbuffer/frontend_internal.h b/drivers/staging/lttng/lib/ringbuffer/frontend_internal.h
-new file mode 100644
-index 0000000..3bd5721
---- /dev/null
-+++ b/drivers/staging/lttng/lib/ringbuffer/frontend_internal.h
-@@ -0,0 +1,424 @@
-+#ifndef _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H
-+#define _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H
-+
-+/*
-+ * linux/ringbuffer/frontend_internal.h
-+ *
-+ * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Ring Buffer Library Synchronization Header (internal helpers).
-+ *
-+ * Author:
-+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * See ring_buffer_frontend.c for more information on wait-free algorithms.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include "../../wrapper/ringbuffer/config.h"
-+#include "../../wrapper/ringbuffer/backend_types.h"
-+#include "../../wrapper/ringbuffer/frontend_types.h"
-+#include "../../lib/prio_heap/lttng_prio_heap.h" /* For per-CPU read-side iterator */
-+
-+/* Buffer offset macros */
-+
-+/* buf_trunc mask selects only the buffer number. */
-+static inline
-+unsigned long buf_trunc(unsigned long offset, struct channel *chan)
-+{
-+ return offset & ~(chan->backend.buf_size - 1);
-+
-+}
-+
-+/* Select the buffer number value (counter). */
-+static inline
-+unsigned long buf_trunc_val(unsigned long offset, struct channel *chan)
-+{
-+ return buf_trunc(offset, chan) >> chan->backend.buf_size_order;
-+}
-+
-+/* buf_offset mask selects only the offset within the current buffer. */
-+static inline
-+unsigned long buf_offset(unsigned long offset, struct channel *chan)
-+{
-+ return offset & (chan->backend.buf_size - 1);
-+}
-+
-+/* subbuf_offset mask selects the offset within the current subbuffer. */
-+static inline
-+unsigned long subbuf_offset(unsigned long offset, struct channel *chan)
-+{
-+ return offset & (chan->backend.subbuf_size - 1);
-+}
-+
-+/* subbuf_trunc mask selects the subbuffer number. */
-+static inline
-+unsigned long subbuf_trunc(unsigned long offset, struct channel *chan)
-+{
-+ return offset & ~(chan->backend.subbuf_size - 1);
-+}
-+
-+/* subbuf_align aligns the offset to the next subbuffer. */
-+static inline
-+unsigned long subbuf_align(unsigned long offset, struct channel *chan)
-+{
-+ return (offset + chan->backend.subbuf_size)
-+ & ~(chan->backend.subbuf_size - 1);
-+}
-+
-+/* subbuf_index returns the index of the current subbuffer within the buffer. */
-+static inline
-+unsigned long subbuf_index(unsigned long offset, struct channel *chan)
-+{
-+ return buf_offset(offset, chan) >> chan->backend.subbuf_size_order;
-+}
-+
-+/*
-+ * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
-+ * bits from the last TSC read. When overflows are detected, the full 64-bit
-+ * timestamp counter should be written in the record header. Reads and writes
-+ * last_tsc atomically.
-+ */
-+
-+#if (BITS_PER_LONG == 32)
-+static inline
-+void save_last_tsc(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf, u64 tsc)
-+{
-+ if (config->tsc_bits == 0 || config->tsc_bits == 64)
-+ return;
-+
-+ /*
-+ * Ensure the compiler performs this update in a single instruction.
-+ */
-+ v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits));
-+}
-+
-+static inline
-+int last_tsc_overflow(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf, u64 tsc)
-+{
-+ unsigned long tsc_shifted;
-+
-+ if (config->tsc_bits == 0 || config->tsc_bits == 64)
-+ return 0;
-+
-+ tsc_shifted = (unsigned long)(tsc >> config->tsc_bits);
-+ if (unlikely(tsc_shifted
-+ - (unsigned long)v_read(config, &buf->last_tsc)))
-+ return 1;
-+ else
-+ return 0;
-+}
-+#else
-+static inline
-+void save_last_tsc(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf, u64 tsc)
-+{
-+ if (config->tsc_bits == 0 || config->tsc_bits == 64)
-+ return;
-+
-+ v_set(config, &buf->last_tsc, (unsigned long)tsc);
-+}
-+
-+static inline
-+int last_tsc_overflow(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf, u64 tsc)
-+{
-+ if (config->tsc_bits == 0 || config->tsc_bits == 64)
-+ return 0;
-+
-+ if (unlikely((tsc - v_read(config, &buf->last_tsc))
-+ >> config->tsc_bits))
-+ return 1;
-+ else
-+ return 0;
-+}
-+#endif
-+
-+extern
-+int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx);
-+
-+extern
-+void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf,
-+ enum switch_mode mode);
-+
-+/* Buffer write helpers */
-+
-+static inline
-+void lib_ring_buffer_reserve_push_reader(struct lib_ring_buffer *buf,
-+ struct channel *chan,
-+ unsigned long offset)
-+{
-+ unsigned long consumed_old, consumed_new;
-+
-+ do {
-+ consumed_old = atomic_long_read(&buf->consumed);
-+ /*
-+ * If buffer is in overwrite mode, push the reader consumed
-+ * count if the write position has reached it and we are not
-+ * at the first iteration (don't push the reader farther than
-+ * the writer). This operation can be done concurrently by many
-+ * writers in the same buffer, the writer being at the farthest
-+ * write position sub-buffer index in the buffer being the one
-+ * which will win this loop.
-+ */
-+ if (unlikely(subbuf_trunc(offset, chan)
-+ - subbuf_trunc(consumed_old, chan)
-+ >= chan->backend.buf_size))
-+ consumed_new = subbuf_align(consumed_old, chan);
-+ else
-+ return;
-+ } while (unlikely(atomic_long_cmpxchg(&buf->consumed, consumed_old,
-+ consumed_new) != consumed_old));
-+}
-+
-+static inline
-+void lib_ring_buffer_vmcore_check_deliver(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf,
-+ unsigned long commit_count,
-+ unsigned long idx)
-+{
-+ if (config->oops == RING_BUFFER_OOPS_CONSISTENCY)
-+ v_set(config, &buf->commit_hot[idx].seq, commit_count);
-+}
-+
-+static inline
-+int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf,
-+ struct channel *chan)
-+{
-+ unsigned long consumed_old, consumed_idx, commit_count, write_offset;
-+
-+ consumed_old = atomic_long_read(&buf->consumed);
-+ consumed_idx = subbuf_index(consumed_old, chan);
-+ commit_count = v_read(config, &buf->commit_cold[consumed_idx].cc_sb);
-+ /*
-+ * No memory barrier here, since we are only interested
-+ * in a statistically correct polling result. The next poll will
-+ * get the data is we are racing. The mb() that ensures correct
-+ * memory order is in get_subbuf.
-+ */
-+ write_offset = v_read(config, &buf->offset);
-+
-+ /*
-+ * Check that the subbuffer we are trying to consume has been
-+ * already fully committed.
-+ */
-+
-+ if (((commit_count - chan->backend.subbuf_size)
-+ & chan->commit_count_mask)
-+ - (buf_trunc(consumed_old, chan)
-+ >> chan->backend.num_subbuf_order)
-+ != 0)
-+ return 0;
-+
-+ /*
-+ * Check that we are not about to read the same subbuffer in
-+ * which the writer head is.
-+ */
-+ if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_old, chan)
-+ == 0)
-+ return 0;
-+
-+ return 1;
-+
-+}
-+
-+static inline
-+int lib_ring_buffer_pending_data(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf,
-+ struct channel *chan)
-+{
-+ return !!subbuf_offset(v_read(config, &buf->offset), chan);
-+}
-+
-+static inline
-+unsigned long lib_ring_buffer_get_data_size(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf,
-+ unsigned long idx)
-+{
-+ return subbuffer_get_data_size(config, &buf->backend, idx);
-+}
-+
-+/*
-+ * Check if all space reservation in a buffer have been committed. This helps
-+ * knowing if an execution context is nested (for per-cpu buffers only).
-+ * This is a very specific ftrace use-case, so we keep this as "internal" API.
-+ */
-+static inline
-+int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf,
-+ struct channel *chan)
-+{
-+ unsigned long offset, idx, commit_count;
-+
-+ CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU);
-+ CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU);
-+
-+ /*
-+ * Read offset and commit count in a loop so they are both read
-+ * atomically wrt interrupts. By deal with interrupt concurrency by
-+ * restarting both reads if the offset has been pushed. Note that given
-+ * we only have to deal with interrupt concurrency here, an interrupt
-+ * modifying the commit count will also modify "offset", so it is safe
-+ * to only check for offset modifications.
-+ */
-+ do {
-+ offset = v_read(config, &buf->offset);
-+ idx = subbuf_index(offset, chan);
-+ commit_count = v_read(config, &buf->commit_hot[idx].cc);
-+ } while (offset != v_read(config, &buf->offset));
-+
-+ return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
-+ - (commit_count & chan->commit_count_mask) == 0);
-+}
-+
-+static inline
-+void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf,
-+ struct channel *chan,
-+ unsigned long offset,
-+ unsigned long commit_count,
-+ unsigned long idx)
-+{
-+ unsigned long old_commit_count = commit_count
-+ - chan->backend.subbuf_size;
-+ u64 tsc;
-+
-+ /* Check if all commits have been done */
-+ if (unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
-+ - (old_commit_count & chan->commit_count_mask) == 0)) {
-+ /*
-+ * If we succeeded at updating cc_sb below, we are the subbuffer
-+ * writer delivering the subbuffer. Deals with concurrent
-+ * updates of the "cc" value without adding a add_return atomic
-+ * operation to the fast path.
-+ *
-+ * We are doing the delivery in two steps:
-+ * - First, we cmpxchg() cc_sb to the new value
-+ * old_commit_count + 1. This ensures that we are the only
-+ * subbuffer user successfully filling the subbuffer, but we
-+ * do _not_ set the cc_sb value to "commit_count" yet.
-+ * Therefore, other writers that would wrap around the ring
-+ * buffer and try to start writing to our subbuffer would
-+ * have to drop records, because it would appear as
-+ * non-filled.
-+ * We therefore have exclusive access to the subbuffer control
-+ * structures. This mutual exclusion with other writers is
-+ * crucially important to perform record overruns count in
-+ * flight recorder mode locklessly.
-+ * - When we are ready to release the subbuffer (either for
-+ * reading or for overrun by other writers), we simply set the
-+ * cc_sb value to "commit_count" and perform delivery.
-+ *
-+ * The subbuffer size is least 2 bytes (minimum size: 1 page).
-+ * This guarantees that old_commit_count + 1 != commit_count.
-+ */
-+ if (likely(v_cmpxchg(config, &buf->commit_cold[idx].cc_sb,
-+ old_commit_count, old_commit_count + 1)
-+ == old_commit_count)) {
-+ /*
-+ * Start of exclusive subbuffer access. We are
-+ * guaranteed to be the last writer in this subbuffer
-+ * and any other writer trying to access this subbuffer
-+ * in this state is required to drop records.
-+ */
-+ tsc = config->cb.ring_buffer_clock_read(chan);
-+ v_add(config,
-+ subbuffer_get_records_count(config,
-+ &buf->backend, idx),
-+ &buf->records_count);
-+ v_add(config,
-+ subbuffer_count_records_overrun(config,
-+ &buf->backend,
-+ idx),
-+ &buf->records_overrun);
-+ config->cb.buffer_end(buf, tsc, idx,
-+ lib_ring_buffer_get_data_size(config,
-+ buf,
-+ idx));
-+
-+ /*
-+ * Set noref flag and offset for this subbuffer id.
-+ * Contains a memory barrier that ensures counter stores
-+ * are ordered before set noref and offset.
-+ */
-+ lib_ring_buffer_set_noref_offset(config, &buf->backend, idx,
-+ buf_trunc_val(offset, chan));
-+
-+ /*
-+ * Order set_noref and record counter updates before the
-+ * end of subbuffer exclusive access. Orders with
-+ * respect to writers coming into the subbuffer after
-+ * wrap around, and also order wrt concurrent readers.
-+ */
-+ smp_mb();
-+ /* End of exclusive subbuffer access */
-+ v_set(config, &buf->commit_cold[idx].cc_sb,
-+ commit_count);
-+ lib_ring_buffer_vmcore_check_deliver(config, buf,
-+ commit_count, idx);
-+
-+ /*
-+ * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free.
-+ */
-+ if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER
-+ && atomic_long_read(&buf->active_readers)
-+ && lib_ring_buffer_poll_deliver(config, buf, chan)) {
-+ wake_up_interruptible(&buf->read_wait);
-+ wake_up_interruptible(&chan->read_wait);
-+ }
-+
-+ }
-+ }
-+}
-+
-+/*
-+ * lib_ring_buffer_write_commit_counter
-+ *
-+ * For flight recording. must be called after commit.
-+ * This function increments the subbuffer's commit_seq counter each time the
-+ * commit count reaches back the reserve offset (modulo subbuffer size). It is
-+ * useful for crash dump.
-+ */
-+static inline
-+void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer *buf,
-+ struct channel *chan,
-+ unsigned long idx,
-+ unsigned long buf_offset,
-+ unsigned long commit_count,
-+ size_t slot_size)
-+{
-+ unsigned long offset, commit_seq_old;
-+
-+ if (config->oops != RING_BUFFER_OOPS_CONSISTENCY)
-+ return;
-+
-+ offset = buf_offset + slot_size;
-+
-+ /*
-+ * subbuf_offset includes commit_count_mask. We can simply
-+ * compare the offsets within the subbuffer without caring about
-+ * buffer full/empty mismatch because offset is never zero here
-+ * (subbuffer header and record headers have non-zero length).
-+ */
-+ if (unlikely(subbuf_offset(offset - commit_count, chan)))
-+ return;
-+
-+ commit_seq_old = v_read(config, &buf->commit_hot[idx].seq);
-+ while ((long) (commit_seq_old - commit_count) < 0)
-+ commit_seq_old = v_cmpxchg(config, &buf->commit_hot[idx].seq,
-+ commit_seq_old, commit_count);
-+}
-+
-+extern int lib_ring_buffer_create(struct lib_ring_buffer *buf,
-+ struct channel_backend *chanb, int cpu);
-+extern void lib_ring_buffer_free(struct lib_ring_buffer *buf);
-+
-+/* Keep track of trap nesting inside ring buffer code */
-+DECLARE_PER_CPU(unsigned int, lib_ring_buffer_nesting);
-+
-+#endif /* _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H */
-diff --git a/drivers/staging/lttng/lib/ringbuffer/frontend_types.h b/drivers/staging/lttng/lib/ringbuffer/frontend_types.h
-new file mode 100644
-index 0000000..5c7437f
---- /dev/null
-+++ b/drivers/staging/lttng/lib/ringbuffer/frontend_types.h
-@@ -0,0 +1,176 @@
-+#ifndef _LINUX_RING_BUFFER_FRONTEND_TYPES_H
-+#define _LINUX_RING_BUFFER_FRONTEND_TYPES_H
-+
-+/*
-+ * linux/ringbuffer/frontend_types.h
-+ *
-+ * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Ring Buffer Library Synchronization Header (types).
-+ *
-+ * Author:
-+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * See ring_buffer_frontend.c for more information on wait-free algorithms.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/kref.h>
-+#include "../../wrapper/ringbuffer/config.h"
-+#include "../../wrapper/ringbuffer/backend_types.h"
-+#include "../../wrapper/spinlock.h"
-+#include "../../lib/prio_heap/lttng_prio_heap.h" /* For per-CPU read-side iterator */
-+
-+/*
-+ * A switch is done during tracing or as a final flush after tracing (so it
-+ * won't write in the new sub-buffer).
-+ */
-+enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
-+
-+/* channel-level read-side iterator */
-+struct channel_iter {
-+ /* Prio heap of buffers. Lowest timestamps at the top. */
-+ struct lttng_ptr_heap heap; /* Heap of struct lib_ring_buffer ptrs */
-+ struct list_head empty_head; /* Empty buffers linked-list head */
-+ int read_open; /* Opened for reading ? */
-+ u64 last_qs; /* Last quiescent state timestamp */
-+ u64 last_timestamp; /* Last timestamp (for WARN_ON) */
-+ int last_cpu; /* Last timestamp cpu */
-+ /*
-+ * read() file operation state.
-+ */
-+ unsigned long len_left;
-+};
-+
-+/* channel: collection of per-cpu ring buffers. */
-+struct channel {
-+ atomic_t record_disabled;
-+ unsigned long commit_count_mask; /*
-+ * Commit count mask, removing
-+ * the MSBs corresponding to
-+ * bits used to represent the
-+ * subbuffer index.
-+ */
-+
-+ struct channel_backend backend; /* Associated backend */
-+
-+ unsigned long switch_timer_interval; /* Buffer flush (jiffies) */
-+ unsigned long read_timer_interval; /* Reader wakeup (jiffies) */
-+ struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */
-+ struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */
-+ struct notifier_block hp_iter_notifier; /* hotplug iterator notifier */
-+ int cpu_hp_enable:1; /* Enable CPU hotplug notif. */
-+ int hp_iter_enable:1; /* Enable hp iter notif. */
-+ wait_queue_head_t read_wait; /* reader wait queue */
-+ wait_queue_head_t hp_wait; /* CPU hotplug wait queue */
-+ int finalized; /* Has channel been finalized */
-+ struct channel_iter iter; /* Channel read-side iterator */
-+ struct kref ref; /* Reference count */
-+};
-+
-+/* Per-subbuffer commit counters used on the hot path */
-+struct commit_counters_hot {
-+ union v_atomic cc; /* Commit counter */
-+ union v_atomic seq; /* Consecutive commits */
-+};
-+
-+/* Per-subbuffer commit counters used only on cold paths */
-+struct commit_counters_cold {
-+ union v_atomic cc_sb; /* Incremented _once_ at sb switch */
-+};
-+
-+/* Per-buffer read iterator */
-+struct lib_ring_buffer_iter {
-+ u64 timestamp; /* Current record timestamp */
-+ size_t header_len; /* Current record header length */
-+ size_t payload_len; /* Current record payload length */
-+
-+ struct list_head empty_node; /* Linked list of empty buffers */
-+ unsigned long consumed, read_offset, data_size;
-+ enum {
-+ ITER_GET_SUBBUF = 0,
-+ ITER_TEST_RECORD,
-+ ITER_NEXT_RECORD,
-+ ITER_PUT_SUBBUF,
-+ } state;
-+ int allocated:1;
-+ int read_open:1; /* Opened for reading ? */
-+};
-+
-+/* ring buffer state */
-+struct lib_ring_buffer {
-+ /* First 32 bytes cache-hot cacheline */
-+ union v_atomic offset; /* Current offset in the buffer */
-+ struct commit_counters_hot *commit_hot;
-+ /* Commit count per sub-buffer */
-+ atomic_long_t consumed; /*
-+ * Current offset in the buffer
-+ * standard atomic access (shared)
-+ */
-+ atomic_t record_disabled;
-+ /* End of first 32 bytes cacheline */
-+ union v_atomic last_tsc; /*
-+ * Last timestamp written in the buffer.
-+ */
-+
-+ struct lib_ring_buffer_backend backend; /* Associated backend */
-+
-+ struct commit_counters_cold *commit_cold;
-+ /* Commit count per sub-buffer */
-+ atomic_long_t active_readers; /*
-+ * Active readers count
-+ * standard atomic access (shared)
-+ */
-+ /* Dropped records */
-+ union v_atomic records_lost_full; /* Buffer full */
-+ union v_atomic records_lost_wrap; /* Nested wrap-around */
-+ union v_atomic records_lost_big; /* Events too big */
-+ union v_atomic records_count; /* Number of records written */
-+ union v_atomic records_overrun; /* Number of overwritten records */
-+ wait_queue_head_t read_wait; /* reader buffer-level wait queue */
-+ wait_queue_head_t write_wait; /* writer buffer-level wait queue (for metadata only) */
-+ int finalized; /* buffer has been finalized */
-+ struct timer_list switch_timer; /* timer for periodical switch */
-+ struct timer_list read_timer; /* timer for read poll */
-+ raw_spinlock_t raw_tick_nohz_spinlock; /* nohz entry lock/trylock */
-+ struct lib_ring_buffer_iter iter; /* read-side iterator */
-+ unsigned long get_subbuf_consumed; /* Read-side consumed */
-+ unsigned long prod_snapshot; /* Producer count snapshot */
-+ unsigned long cons_snapshot; /* Consumer count snapshot */
-+ int get_subbuf:1; /* Sub-buffer being held by reader */
-+ int switch_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
-+ int read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
-+};
-+
-+static inline
-+void *channel_get_private(struct channel *chan)
-+{
-+ return chan->backend.priv;
-+}
-+
-+/*
-+ * Issue warnings and disable channels upon internal error.
-+ * Can receive struct lib_ring_buffer or struct lib_ring_buffer_backend
-+ * parameters.
-+ */
-+#define CHAN_WARN_ON(c, cond) \
-+ ({ \
-+ struct channel *__chan; \
-+ int _____ret = unlikely(cond); \
-+ if (_____ret) { \
-+ if (__same_type(*(c), struct channel_backend)) \
-+ __chan = container_of((void *) (c), \
-+ struct channel, \
-+ backend); \
-+ else if (__same_type(*(c), struct channel)) \
-+ __chan = (void *) (c); \
-+ else \
-+ BUG_ON(1); \
-+ atomic_inc(&__chan->record_disabled); \
-+ WARN_ON(1); \
-+ } \
-+ _____ret; \
-+ })
-+
-+#endif /* _LINUX_RING_BUFFER_FRONTEND_TYPES_H */
-diff --git a/drivers/staging/lttng/lib/ringbuffer/iterator.h b/drivers/staging/lttng/lib/ringbuffer/iterator.h
-new file mode 100644
-index 0000000..f2bd50d
---- /dev/null
-+++ b/drivers/staging/lttng/lib/ringbuffer/iterator.h
-@@ -0,0 +1,70 @@
-+#ifndef _LINUX_RING_BUFFER_ITERATOR_H
-+#define _LINUX_RING_BUFFER_ITERATOR_H
-+
-+/*
-+ * linux/ringbuffer/iterator.h
-+ *
-+ * (C) Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Ring buffer and channel iterators.
-+ *
-+ * Author:
-+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include "../../wrapper/ringbuffer/backend.h"
-+#include "../../wrapper/ringbuffer/frontend.h"
-+
-+/*
-+ * lib_ring_buffer_get_next_record advances the buffer read position to the next
-+ * record. It returns either the size of the next record, -EAGAIN if there is
-+ * currently no data available, or -ENODATA if no data is available and buffer
-+ * is finalized.
-+ */
-+extern ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
-+ struct lib_ring_buffer *buf);
-+
-+/*
-+ * channel_get_next_record advances the buffer read position to the next record.
-+ * It returns either the size of the next record, -EAGAIN if there is currently
-+ * no data available, or -ENODATA if no data is available and buffer is
-+ * finalized.
-+ * Returns the current buffer in ret_buf.
-+ */
-+extern ssize_t channel_get_next_record(struct channel *chan,
-+ struct lib_ring_buffer **ret_buf);
-+
-+/**
-+ * read_current_record - copy the buffer current record into dest.
-+ * @buf: ring buffer
-+ * @dest: destination where the record should be copied
-+ *
-+ * dest should be large enough to contain the record. Returns the number of
-+ * bytes copied.
-+ */
-+static inline size_t read_current_record(struct lib_ring_buffer *buf, void *dest)
-+{
-+ return lib_ring_buffer_read(&buf->backend, buf->iter.read_offset,
-+ dest, buf->iter.payload_len);
-+}
-+
-+extern int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf);
-+extern void lib_ring_buffer_iterator_release(struct lib_ring_buffer *buf);
-+extern int channel_iterator_open(struct channel *chan);
-+extern void channel_iterator_release(struct channel *chan);
-+
-+extern const struct file_operations channel_payload_file_operations;
-+extern const struct file_operations lib_ring_buffer_payload_file_operations;
-+
-+/*
-+ * Used internally.
-+ */
-+int channel_iterator_init(struct channel *chan);
-+void channel_iterator_unregister_notifiers(struct channel *chan);
-+void channel_iterator_free(struct channel *chan);
-+void channel_iterator_reset(struct channel *chan);
-+void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf);
-+
-+#endif /* _LINUX_RING_BUFFER_ITERATOR_H */
-diff --git a/drivers/staging/lttng/lib/ringbuffer/nohz.h b/drivers/staging/lttng/lib/ringbuffer/nohz.h
-new file mode 100644
-index 0000000..3c31072
---- /dev/null
-+++ b/drivers/staging/lttng/lib/ringbuffer/nohz.h
-@@ -0,0 +1,30 @@
-+#ifndef _LINUX_RING_BUFFER_NOHZ_H
-+#define _LINUX_RING_BUFFER_NOHZ_H
-+
-+/*
-+ * ringbuffer/nohz.h
-+ *
-+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#ifdef CONFIG_LIB_RING_BUFFER
-+void lib_ring_buffer_tick_nohz_flush(void);
-+void lib_ring_buffer_tick_nohz_stop(void);
-+void lib_ring_buffer_tick_nohz_restart(void);
-+#else
-+static inline void lib_ring_buffer_tick_nohz_flush(void)
-+{
-+}
-+
-+static inline void lib_ring_buffer_tick_nohz_stop(void)
-+{
-+}
-+
-+static inline void lib_ring_buffer_tick_nohz_restart(void)
-+{
-+}
-+#endif
-+
-+#endif /* _LINUX_RING_BUFFER_NOHZ_H */
-diff --git a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_backend.c b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_backend.c
-new file mode 100644
-index 0000000..d1b5b8c
---- /dev/null
-+++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_backend.c
-@@ -0,0 +1,854 @@
-+/*
-+ * ring_buffer_backend.c
-+ *
-+ * Copyright (C) 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/stddef.h>
-+#include <linux/module.h>
-+#include <linux/string.h>
-+#include <linux/bitops.h>
-+#include <linux/delay.h>
-+#include <linux/errno.h>
-+#include <linux/slab.h>
-+#include <linux/cpu.h>
-+#include <linux/mm.h>
-+
-+#include "../../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
-+#include "../../wrapper/ringbuffer/config.h"
-+#include "../../wrapper/ringbuffer/backend.h"
-+#include "../../wrapper/ringbuffer/frontend.h"
-+
-+/**
-+ * lib_ring_buffer_backend_allocate - allocate a channel buffer
-+ * @config: ring buffer instance configuration
-+ * @buf: the buffer struct
-+ * @size: total size of the buffer
-+ * @num_subbuf: number of subbuffers
-+ * @extra_reader_sb: need extra subbuffer for reader
-+ */
-+static
-+int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_backend *bufb,
-+ size_t size, size_t num_subbuf,
-+ int extra_reader_sb)
-+{
-+ struct channel_backend *chanb = &bufb->chan->backend;
-+ unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0;
-+ unsigned long subbuf_size, mmap_offset = 0;
-+ unsigned long num_subbuf_alloc;
-+ struct page **pages;
-+ void **virt;
-+ unsigned long i;
-+
-+ num_pages = size >> PAGE_SHIFT;
-+ num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
-+ subbuf_size = chanb->subbuf_size;
-+ num_subbuf_alloc = num_subbuf;
-+
-+ if (extra_reader_sb) {
-+ num_pages += num_pages_per_subbuf; /* Add pages for reader */
-+ num_subbuf_alloc++;
-+ }
-+
-+ pages = kmalloc_node(ALIGN(sizeof(*pages) * num_pages,
-+ 1 << INTERNODE_CACHE_SHIFT),
-+ GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
-+ if (unlikely(!pages))
-+ goto pages_error;
-+
-+ virt = kmalloc_node(ALIGN(sizeof(*virt) * num_pages,
-+ 1 << INTERNODE_CACHE_SHIFT),
-+ GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
-+ if (unlikely(!virt))
-+ goto virt_error;
-+
-+ bufb->array = kmalloc_node(ALIGN(sizeof(*bufb->array)
-+ * num_subbuf_alloc,
-+ 1 << INTERNODE_CACHE_SHIFT),
-+ GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
-+ if (unlikely(!bufb->array))
-+ goto array_error;
-+
-+ for (i = 0; i < num_pages; i++) {
-+ pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
-+ GFP_KERNEL | __GFP_ZERO, 0);
-+ if (unlikely(!pages[i]))
-+ goto depopulate;
-+ virt[i] = page_address(pages[i]);
-+ }
-+ bufb->num_pages_per_subbuf = num_pages_per_subbuf;
-+
-+ /* Allocate backend pages array elements */
-+ for (i = 0; i < num_subbuf_alloc; i++) {
-+ bufb->array[i] =
-+ kzalloc_node(ALIGN(
-+ sizeof(struct lib_ring_buffer_backend_pages) +
-+ sizeof(struct lib_ring_buffer_backend_page)
-+ * num_pages_per_subbuf,
-+ 1 << INTERNODE_CACHE_SHIFT),
-+ GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
-+ if (!bufb->array[i])
-+ goto free_array;
-+ }
-+
-+ /* Allocate write-side subbuffer table */
-+ bufb->buf_wsb = kzalloc_node(ALIGN(
-+ sizeof(struct lib_ring_buffer_backend_subbuffer)
-+ * num_subbuf,
-+ 1 << INTERNODE_CACHE_SHIFT),
-+ GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
-+ if (unlikely(!bufb->buf_wsb))
-+ goto free_array;
-+
-+ for (i = 0; i < num_subbuf; i++)
-+ bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
-+
-+ /* Assign read-side subbuffer table */
-+ if (extra_reader_sb)
-+ bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
-+ num_subbuf_alloc - 1);
-+ else
-+ bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
-+
-+ /* Assign pages to page index */
-+ for (i = 0; i < num_subbuf_alloc; i++) {
-+ for (j = 0; j < num_pages_per_subbuf; j++) {
-+ CHAN_WARN_ON(chanb, page_idx > num_pages);
-+ bufb->array[i]->p[j].virt = virt[page_idx];
-+ bufb->array[i]->p[j].page = pages[page_idx];
-+ page_idx++;
-+ }
-+ if (config->output == RING_BUFFER_MMAP) {
-+ bufb->array[i]->mmap_offset = mmap_offset;
-+ mmap_offset += subbuf_size;
-+ }
-+ }
-+
-+ /*
-+ * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
-+ * will not fault.
-+ */
-+ wrapper_vmalloc_sync_all();
-+ kfree(virt);
-+ kfree(pages);
-+ return 0;
-+
-+free_array:
-+ for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
-+ kfree(bufb->array[i]);
-+depopulate:
-+ /* Free all allocated pages */
-+ for (i = 0; (i < num_pages && pages[i]); i++)
-+ __free_page(pages[i]);
-+ kfree(bufb->array);
-+array_error:
-+ kfree(virt);
-+virt_error:
-+ kfree(pages);
-+pages_error:
-+ return -ENOMEM;
-+}
-+
-+int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
-+ struct channel_backend *chanb, int cpu)
-+{
-+ const struct lib_ring_buffer_config *config = chanb->config;
-+
-+ bufb->chan = container_of(chanb, struct channel, backend);
-+ bufb->cpu = cpu;
-+
-+ return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
-+ chanb->num_subbuf,
-+ chanb->extra_reader_sb);
-+}
-+
-+void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
-+{
-+ struct channel_backend *chanb = &bufb->chan->backend;
-+ unsigned long i, j, num_subbuf_alloc;
-+
-+ num_subbuf_alloc = chanb->num_subbuf;
-+ if (chanb->extra_reader_sb)
-+ num_subbuf_alloc++;
-+
-+ kfree(bufb->buf_wsb);
-+ for (i = 0; i < num_subbuf_alloc; i++) {
-+ for (j = 0; j < bufb->num_pages_per_subbuf; j++)
-+ __free_page(bufb->array[i]->p[j].page);
-+ kfree(bufb->array[i]);
-+ }
-+ kfree(bufb->array);
-+ bufb->allocated = 0;
-+}
-+
-+void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
-+{
-+ struct channel_backend *chanb = &bufb->chan->backend;
-+ const struct lib_ring_buffer_config *config = chanb->config;
-+ unsigned long num_subbuf_alloc;
-+ unsigned int i;
-+
-+ num_subbuf_alloc = chanb->num_subbuf;
-+ if (chanb->extra_reader_sb)
-+ num_subbuf_alloc++;
-+
-+ for (i = 0; i < chanb->num_subbuf; i++)
-+ bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
-+ if (chanb->extra_reader_sb)
-+ bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
-+ num_subbuf_alloc - 1);
-+ else
-+ bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
-+
-+ for (i = 0; i < num_subbuf_alloc; i++) {
-+ /* Don't reset mmap_offset */
-+ v_set(config, &bufb->array[i]->records_commit, 0);
-+ v_set(config, &bufb->array[i]->records_unread, 0);
-+ bufb->array[i]->data_size = 0;
-+ /* Don't reset backend page and virt addresses */
-+ }
-+ /* Don't reset num_pages_per_subbuf, cpu, allocated */
-+ v_set(config, &bufb->records_read, 0);
-+}
-+
-+/*
-+ * The frontend is responsible for also calling ring_buffer_backend_reset for
-+ * each buffer when calling channel_backend_reset.
-+ */
-+void channel_backend_reset(struct channel_backend *chanb)
-+{
-+ struct channel *chan = container_of(chanb, struct channel, backend);
-+ const struct lib_ring_buffer_config *config = chanb->config;
-+
-+ /*
-+ * Don't reset buf_size, subbuf_size, subbuf_size_order,
-+ * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
-+ * priv, notifiers, config, cpumask and name.
-+ */
-+ chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+/**
-+ * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
-+ * @nb: notifier block
-+ * @action: hotplug action to take
-+ * @hcpu: CPU number
-+ *
-+ * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
-+ */
-+static
-+int __cpuinit lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
-+ unsigned long action,
-+ void *hcpu)
-+{
-+ unsigned int cpu = (unsigned long)hcpu;
-+ struct channel_backend *chanb = container_of(nb, struct channel_backend,
-+ cpu_hp_notifier);
-+ const struct lib_ring_buffer_config *config = chanb->config;
-+ struct lib_ring_buffer *buf;
-+ int ret;
-+
-+ CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
-+
-+ switch (action) {
-+ case CPU_UP_PREPARE:
-+ case CPU_UP_PREPARE_FROZEN:
-+ buf = per_cpu_ptr(chanb->buf, cpu);
-+ ret = lib_ring_buffer_create(buf, chanb, cpu);
-+ if (ret) {
-+ printk(KERN_ERR
-+ "ring_buffer_cpu_hp_callback: cpu %d "
-+ "buffer creation failed\n", cpu);
-+ return NOTIFY_BAD;
-+ }
-+ break;
-+ case CPU_DEAD:
-+ case CPU_DEAD_FROZEN:
-+ /* No need to do a buffer switch here, because it will happen
-+ * when tracing is stopped, or will be done by switch timer CPU
-+ * DEAD callback. */
-+ break;
-+ }
-+ return NOTIFY_OK;
-+}
-+#endif
-+
-+/**
-+ * channel_backend_init - initialize a channel backend
-+ * @chanb: channel backend
-+ * @name: channel name
-+ * @config: client ring buffer configuration
-+ * @priv: client private data
-+ * @parent: dentry of parent directory, %NULL for root directory
-+ * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
-+ * @num_subbuf: number of sub-buffers (power of 2)
-+ *
-+ * Returns channel pointer if successful, %NULL otherwise.
-+ *
-+ * Creates per-cpu channel buffers using the sizes and attributes
-+ * specified. The created channel buffer files will be named
-+ * name_0...name_N-1. File permissions will be %S_IRUSR.
-+ *
-+ * Called with CPU hotplug disabled.
-+ */
-+int channel_backend_init(struct channel_backend *chanb,
-+ const char *name,
-+ const struct lib_ring_buffer_config *config,
-+ void *priv, size_t subbuf_size, size_t num_subbuf)
-+{
-+ struct channel *chan = container_of(chanb, struct channel, backend);
-+ unsigned int i;
-+ int ret;
-+
-+ if (!name)
-+ return -EPERM;
-+
-+ if (!(subbuf_size && num_subbuf))
-+ return -EPERM;
-+
-+ /* Check that the subbuffer size is larger than a page. */
-+ if (subbuf_size < PAGE_SIZE)
-+ return -EINVAL;
-+
-+ /*
-+ * Make sure the number of subbuffers and subbuffer size are power of 2.
-+ */
-+ CHAN_WARN_ON(chanb, hweight32(subbuf_size) != 1);
-+ CHAN_WARN_ON(chanb, hweight32(num_subbuf) != 1);
-+
-+ ret = subbuffer_id_check_index(config, num_subbuf);
-+ if (ret)
-+ return ret;
-+
-+ chanb->priv = priv;
-+ chanb->buf_size = num_subbuf * subbuf_size;
-+ chanb->subbuf_size = subbuf_size;
-+ chanb->buf_size_order = get_count_order(chanb->buf_size);
-+ chanb->subbuf_size_order = get_count_order(subbuf_size);
-+ chanb->num_subbuf_order = get_count_order(num_subbuf);
-+ chanb->extra_reader_sb =
-+ (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
-+ chanb->num_subbuf = num_subbuf;
-+ strlcpy(chanb->name, name, NAME_MAX);
-+ chanb->config = config;
-+
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-+ if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
-+ return -ENOMEM;
-+ }
-+
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-+ /* Allocating the buffer per-cpu structures */
-+ chanb->buf = alloc_percpu(struct lib_ring_buffer);
-+ if (!chanb->buf)
-+ goto free_cpumask;
-+
-+ /*
-+ * In case of non-hotplug cpu, if the ring-buffer is allocated
-+ * in early initcall, it will not be notified of secondary cpus.
-+ * In that off case, we need to allocate for all possible cpus.
-+ */
-+#ifdef CONFIG_HOTPLUG_CPU
-+ /*
-+ * buf->backend.allocated test takes care of concurrent CPU
-+ * hotplug.
-+ * Priority higher than frontend, so we create the ring buffer
-+ * before we start the timer.
-+ */
-+ chanb->cpu_hp_notifier.notifier_call =
-+ lib_ring_buffer_cpu_hp_callback;
-+ chanb->cpu_hp_notifier.priority = 5;
-+ register_hotcpu_notifier(&chanb->cpu_hp_notifier);
-+
-+ get_online_cpus();
-+ for_each_online_cpu(i) {
-+ ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
-+ chanb, i);
-+ if (ret)
-+ goto free_bufs; /* cpu hotplug locked */
-+ }
-+ put_online_cpus();
-+#else
-+ for_each_possible_cpu(i) {
-+ ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
-+ chanb, i);
-+ if (ret)
-+ goto free_bufs; /* cpu hotplug locked */
-+ }
-+#endif
-+ } else {
-+ chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL);
-+ if (!chanb->buf)
-+ goto free_cpumask;
-+ ret = lib_ring_buffer_create(chanb->buf, chanb, -1);
-+ if (ret)
-+ goto free_bufs;
-+ }
-+ chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
-+
-+ return 0;
-+
-+free_bufs:
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-+ for_each_possible_cpu(i) {
-+ struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
-+
-+ if (!buf->backend.allocated)
-+ continue;
-+ lib_ring_buffer_free(buf);
-+ }
-+#ifdef CONFIG_HOTPLUG_CPU
-+ put_online_cpus();
-+#endif
-+ free_percpu(chanb->buf);
-+ } else
-+ kfree(chanb->buf);
-+free_cpumask:
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-+ free_cpumask_var(chanb->cpumask);
-+ return -ENOMEM;
-+}
-+
-+/**
-+ * channel_backend_unregister_notifiers - unregister notifiers
-+ * @chan: the channel
-+ *
-+ * Holds CPU hotplug.
-+ */
-+void channel_backend_unregister_notifiers(struct channel_backend *chanb)
-+{
-+ const struct lib_ring_buffer_config *config = chanb->config;
-+
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-+ unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
-+}
-+
-+/**
-+ * channel_backend_free - destroy the channel
-+ * @chan: the channel
-+ *
-+ * Destroy all channel buffers and frees the channel.
-+ */
-+void channel_backend_free(struct channel_backend *chanb)
-+{
-+ const struct lib_ring_buffer_config *config = chanb->config;
-+ unsigned int i;
-+
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-+ for_each_possible_cpu(i) {
-+ struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
-+
-+ if (!buf->backend.allocated)
-+ continue;
-+ lib_ring_buffer_free(buf);
-+ }
-+ free_cpumask_var(chanb->cpumask);
-+ free_percpu(chanb->buf);
-+ } else {
-+ struct lib_ring_buffer *buf = chanb->buf;
-+
-+ CHAN_WARN_ON(chanb, !buf->backend.allocated);
-+ lib_ring_buffer_free(buf);
-+ kfree(buf);
-+ }
-+}
-+
-+/**
-+ * lib_ring_buffer_write - write data to a ring_buffer buffer.
-+ * @bufb : buffer backend
-+ * @offset : offset within the buffer
-+ * @src : source address
-+ * @len : length to write
-+ * @pagecpy : page size copied so far
-+ */
-+void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset,
-+ const void *src, size_t len, ssize_t pagecpy)
-+{
-+ struct channel_backend *chanb = &bufb->chan->backend;
-+ const struct lib_ring_buffer_config *config = chanb->config;
-+ size_t sbidx, index;
-+ struct lib_ring_buffer_backend_pages *rpages;
-+ unsigned long sb_bindex, id;
-+
-+ do {
-+ len -= pagecpy;
-+ src += pagecpy;
-+ offset += pagecpy;
-+ sbidx = offset >> chanb->subbuf_size_order;
-+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-+
-+ /*
-+ * Underlying layer should never ask for writes across
-+ * subbuffers.
-+ */
-+ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
-+
-+ pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
-+ id = bufb->buf_wsb[sbidx].id;
-+ sb_bindex = subbuffer_id_get_index(config, id);
-+ rpages = bufb->array[sb_bindex];
-+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
-+ && subbuffer_id_is_noref(config, id));
-+ lib_ring_buffer_do_copy(config,
-+ rpages->p[index].virt
-+ + (offset & ~PAGE_MASK),
-+ src, pagecpy);
-+ } while (unlikely(len != pagecpy));
-+}
-+EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
-+
-+
-+/**
-+ * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
-+ * @bufb : buffer backend
-+ * @offset : offset within the buffer
-+ * @c : the byte to write
-+ * @len : length to write
-+ * @pagecpy : page size copied so far
-+ */
-+void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
-+ size_t offset,
-+ int c, size_t len, ssize_t pagecpy)
-+{
-+ struct channel_backend *chanb = &bufb->chan->backend;
-+ const struct lib_ring_buffer_config *config = chanb->config;
-+ size_t sbidx, index;
-+ struct lib_ring_buffer_backend_pages *rpages;
-+ unsigned long sb_bindex, id;
-+
-+ do {
-+ len -= pagecpy;
-+ offset += pagecpy;
-+ sbidx = offset >> chanb->subbuf_size_order;
-+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-+
-+ /*
-+ * Underlying layer should never ask for writes across
-+ * subbuffers.
-+ */
-+ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
-+
-+ pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
-+ id = bufb->buf_wsb[sbidx].id;
-+ sb_bindex = subbuffer_id_get_index(config, id);
-+ rpages = bufb->array[sb_bindex];
-+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
-+ && subbuffer_id_is_noref(config, id));
-+ lib_ring_buffer_do_memset(rpages->p[index].virt
-+ + (offset & ~PAGE_MASK),
-+ c, pagecpy);
-+ } while (unlikely(len != pagecpy));
-+}
-+EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
-+
-+
-+/**
-+ * lib_ring_buffer_copy_from_user - write user data to a ring_buffer buffer.
-+ * @bufb : buffer backend
-+ * @offset : offset within the buffer
-+ * @src : source address
-+ * @len : length to write
-+ * @pagecpy : page size copied so far
-+ *
-+ * This function deals with userspace pointers, it should never be called
-+ * directly without having the src pointer checked with access_ok()
-+ * previously.
-+ */
-+void _lib_ring_buffer_copy_from_user(struct lib_ring_buffer_backend *bufb,
-+ size_t offset,
-+ const void __user *src, size_t len,
-+ ssize_t pagecpy)
-+{
-+ struct channel_backend *chanb = &bufb->chan->backend;
-+ const struct lib_ring_buffer_config *config = chanb->config;
-+ size_t sbidx, index;
-+ struct lib_ring_buffer_backend_pages *rpages;
-+ unsigned long sb_bindex, id;
-+ int ret;
-+
-+ do {
-+ len -= pagecpy;
-+ src += pagecpy;
-+ offset += pagecpy;
-+ sbidx = offset >> chanb->subbuf_size_order;
-+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-+
-+ /*
-+ * Underlying layer should never ask for writes across
-+ * subbuffers.
-+ */
-+ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
-+
-+ pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
-+ id = bufb->buf_wsb[sbidx].id;
-+ sb_bindex = subbuffer_id_get_index(config, id);
-+ rpages = bufb->array[sb_bindex];
-+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
-+ && subbuffer_id_is_noref(config, id));
-+ ret = lib_ring_buffer_do_copy_from_user(rpages->p[index].virt
-+ + (offset & ~PAGE_MASK),
-+ src, pagecpy) != 0;
-+ if (ret > 0) {
-+ offset += (pagecpy - ret);
-+ len -= (pagecpy - ret);
-+ _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
-+ break; /* stop copy */
-+ }
-+ } while (unlikely(len != pagecpy));
-+}
-+EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user);
-+
-+/**
-+ * lib_ring_buffer_read - read data from ring_buffer_buffer.
-+ * @bufb : buffer backend
-+ * @offset : offset within the buffer
-+ * @dest : destination address
-+ * @len : length to copy to destination
-+ *
-+ * Should be protected by get_subbuf/put_subbuf.
-+ * Returns the length copied.
-+ */
-+size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset,
-+ void *dest, size_t len)
-+{
-+ struct channel_backend *chanb = &bufb->chan->backend;
-+ const struct lib_ring_buffer_config *config = chanb->config;
-+ size_t index;
-+ ssize_t pagecpy, orig_len;
-+ struct lib_ring_buffer_backend_pages *rpages;
-+ unsigned long sb_bindex, id;
-+
-+ orig_len = len;
-+ offset &= chanb->buf_size - 1;
-+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-+ if (unlikely(!len))
-+ return 0;
-+ for (;;) {
-+ pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
-+ id = bufb->buf_rsb.id;
-+ sb_bindex = subbuffer_id_get_index(config, id);
-+ rpages = bufb->array[sb_bindex];
-+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
-+ && subbuffer_id_is_noref(config, id));
-+ memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK),
-+ pagecpy);
-+ len -= pagecpy;
-+ if (likely(!len))
-+ break;
-+ dest += pagecpy;
-+ offset += pagecpy;
-+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-+ /*
-+ * Underlying layer should never ask for reads across
-+ * subbuffers.
-+ */
-+ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
-+ }
-+ return orig_len;
-+}
-+EXPORT_SYMBOL_GPL(lib_ring_buffer_read);
-+
-+/**
-+ * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
-+ * @bufb : buffer backend
-+ * @offset : offset within the buffer
-+ * @dest : destination userspace address
-+ * @len : length to copy to destination
-+ *
-+ * Should be protected by get_subbuf/put_subbuf.
-+ * access_ok() must have been performed on dest addresses prior to call this
-+ * function.
-+ * Returns -EFAULT on error, 0 if ok.
-+ */
-+int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
-+ size_t offset, void __user *dest, size_t len)
-+{
-+ struct channel_backend *chanb = &bufb->chan->backend;
-+ const struct lib_ring_buffer_config *config = chanb->config;
-+ size_t index;
-+ ssize_t pagecpy;
-+ struct lib_ring_buffer_backend_pages *rpages;
-+ unsigned long sb_bindex, id;
-+
-+ offset &= chanb->buf_size - 1;
-+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-+ if (unlikely(!len))
-+ return 0;
-+ for (;;) {
-+ pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
-+ id = bufb->buf_rsb.id;
-+ sb_bindex = subbuffer_id_get_index(config, id);
-+ rpages = bufb->array[sb_bindex];
-+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
-+ && subbuffer_id_is_noref(config, id));
-+ if (__copy_to_user(dest,
-+ rpages->p[index].virt + (offset & ~PAGE_MASK),
-+ pagecpy))
-+ return -EFAULT;
-+ len -= pagecpy;
-+ if (likely(!len))
-+ break;
-+ dest += pagecpy;
-+ offset += pagecpy;
-+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-+ /*
-+ * Underlying layer should never ask for reads across
-+ * subbuffers.
-+ */
-+ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user);
-+
-+/**
-+ * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
-+ * @bufb : buffer backend
-+ * @offset : offset within the buffer
-+ * @dest : destination address
-+ * @len : destination's length
-+ *
-+ * return string's length
-+ * Should be protected by get_subbuf/put_subbuf.
-+ */
-+int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset,
-+ void *dest, size_t len)
-+{
-+ struct channel_backend *chanb = &bufb->chan->backend;
-+ const struct lib_ring_buffer_config *config = chanb->config;
-+ size_t index;
-+ ssize_t pagecpy, pagelen, strpagelen, orig_offset;
-+ char *str;
-+ struct lib_ring_buffer_backend_pages *rpages;
-+ unsigned long sb_bindex, id;
-+
-+ offset &= chanb->buf_size - 1;
-+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-+ orig_offset = offset;
-+ for (;;) {
-+ id = bufb->buf_rsb.id;
-+ sb_bindex = subbuffer_id_get_index(config, id);
-+ rpages = bufb->array[sb_bindex];
-+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
-+ && subbuffer_id_is_noref(config, id));
-+ str = (char *)rpages->p[index].virt + (offset & ~PAGE_MASK);
-+ pagelen = PAGE_SIZE - (offset & ~PAGE_MASK);
-+ strpagelen = strnlen(str, pagelen);
-+ if (len) {
-+ pagecpy = min_t(size_t, len, strpagelen);
-+ if (dest) {
-+ memcpy(dest, str, pagecpy);
-+ dest += pagecpy;
-+ }
-+ len -= pagecpy;
-+ }
-+ offset += strpagelen;
-+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-+ if (strpagelen < pagelen)
-+ break;
-+ /*
-+ * Underlying layer should never ask for reads across
-+ * subbuffers.
-+ */
-+ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
-+ }
-+ if (dest && len)
-+ ((char *)dest)[0] = 0;
-+ return offset - orig_offset;
-+}
-+EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr);
-+
-+/**
-+ * lib_ring_buffer_read_get_page - Get a whole page to read from
-+ * @bufb : buffer backend
-+ * @offset : offset within the buffer
-+ * @virt : pointer to page address (output)
-+ *
-+ * Should be protected by get_subbuf/put_subbuf.
-+ * Returns the pointer to the page struct pointer.
-+ */
-+struct page **lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend *bufb,
-+ size_t offset, void ***virt)
-+{
-+ size_t index;
-+ struct lib_ring_buffer_backend_pages *rpages;
-+ struct channel_backend *chanb = &bufb->chan->backend;
-+ const struct lib_ring_buffer_config *config = chanb->config;
-+ unsigned long sb_bindex, id;
-+
-+ offset &= chanb->buf_size - 1;
-+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-+ id = bufb->buf_rsb.id;
-+ sb_bindex = subbuffer_id_get_index(config, id);
-+ rpages = bufb->array[sb_bindex];
-+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
-+ && subbuffer_id_is_noref(config, id));
-+ *virt = &rpages->p[index].virt;
-+ return &rpages->p[index].page;
-+}
-+EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_page);
-+
-+/**
-+ * lib_ring_buffer_read_offset_address - get address of a buffer location
-+ * @bufb : buffer backend
-+ * @offset : offset within the buffer.
-+ *
-+ * Return the address where a given offset is located (for read).
-+ * Should be used to get the current subbuffer header pointer. Given we know
-+ * it's never on a page boundary, it's safe to write directly to this address,
-+ * as long as the write is never bigger than a page size.
-+ */
-+void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
-+ size_t offset)
-+{
-+ size_t index;
-+ struct lib_ring_buffer_backend_pages *rpages;
-+ struct channel_backend *chanb = &bufb->chan->backend;
-+ const struct lib_ring_buffer_config *config = chanb->config;
-+ unsigned long sb_bindex, id;
-+
-+ offset &= chanb->buf_size - 1;
-+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-+ id = bufb->buf_rsb.id;
-+ sb_bindex = subbuffer_id_get_index(config, id);
-+ rpages = bufb->array[sb_bindex];
-+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
-+ && subbuffer_id_is_noref(config, id));
-+ return rpages->p[index].virt + (offset & ~PAGE_MASK);
-+}
-+EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address);
-+
-+/**
-+ * lib_ring_buffer_offset_address - get address of a location within the buffer
-+ * @bufb : buffer backend
-+ * @offset : offset within the buffer.
-+ *
-+ * Return the address where a given offset is located.
-+ * Should be used to get the current subbuffer header pointer. Given we know
-+ * it's always at the beginning of a page, it's safe to write directly to this
-+ * address, as long as the write is never bigger than a page size.
-+ */
-+void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
-+ size_t offset)
-+{
-+ size_t sbidx, index;
-+ struct lib_ring_buffer_backend_pages *rpages;
-+ struct channel_backend *chanb = &bufb->chan->backend;
-+ const struct lib_ring_buffer_config *config = chanb->config;
-+ unsigned long sb_bindex, id;
-+
-+ offset &= chanb->buf_size - 1;
-+ sbidx = offset >> chanb->subbuf_size_order;
-+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
-+ id = bufb->buf_wsb[sbidx].id;
-+ sb_bindex = subbuffer_id_get_index(config, id);
-+ rpages = bufb->array[sb_bindex];
-+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
-+ && subbuffer_id_is_noref(config, id));
-+ return rpages->p[index].virt + (offset & ~PAGE_MASK);
-+}
-+EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address);
-diff --git a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c
-new file mode 100644
-index 0000000..802f5cd
---- /dev/null
-+++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c
-@@ -0,0 +1,1721 @@
-+/*
-+ * ring_buffer_frontend.c
-+ *
-+ * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Ring buffer wait-free buffer synchronization. Producer-consumer and flight
-+ * recorder (overwrite) modes. See thesis:
-+ *
-+ * Desnoyers, Mathieu (2009), "Low-Impact Operating System Tracing", Ph.D.
-+ * dissertation, Ecole Polytechnique de Montreal.
-+ * http://www.lttng.org/pub/thesis/desnoyers-dissertation-2009-12.pdf
-+ *
-+ * - Algorithm presentation in Chapter 5:
-+ * "Lockless Multi-Core High-Throughput Buffering".
-+ * - Algorithm formal verification in Section 8.6:
-+ * "Formal verification of LTTng"
-+ *
-+ * Author:
-+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Inspired from LTT and RelayFS:
-+ * Karim Yaghmour <karim@opersys.com>
-+ * Tom Zanussi <zanussi@us.ibm.com>
-+ * Bob Wisniewski <bob@watson.ibm.com>
-+ * And from K42 :
-+ * Bob Wisniewski <bob@watson.ibm.com>
-+ *
-+ * Buffer reader semantic :
-+ *
-+ * - get_subbuf_size
-+ * while buffer is not finalized and empty
-+ * - get_subbuf
-+ * - if return value != 0, continue
-+ * - splice one subbuffer worth of data to a pipe
-+ * - splice the data from pipe to disk/network
-+ * - put_subbuf
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/delay.h>
-+#include <linux/module.h>
-+#include <linux/percpu.h>
-+
-+#include "../../wrapper/ringbuffer/config.h"
-+#include "../../wrapper/ringbuffer/backend.h"
-+#include "../../wrapper/ringbuffer/frontend.h"
-+#include "../../wrapper/ringbuffer/iterator.h"
-+#include "../../wrapper/ringbuffer/nohz.h"
-+
-+/*
-+ * Internal structure representing offsets to use at a sub-buffer switch.
-+ */
-+struct switch_offsets {
-+ unsigned long begin, end, old;
-+ size_t pre_header_padding, size;
-+ unsigned int switch_new_start:1, switch_new_end:1, switch_old_start:1,
-+ switch_old_end:1;
-+};
-+
-+#ifdef CONFIG_NO_HZ
-+enum tick_nohz_val {
-+ TICK_NOHZ_STOP,
-+ TICK_NOHZ_FLUSH,
-+ TICK_NOHZ_RESTART,
-+};
-+
-+static ATOMIC_NOTIFIER_HEAD(tick_nohz_notifier);
-+#endif /* CONFIG_NO_HZ */
-+
-+static DEFINE_PER_CPU(spinlock_t, ring_buffer_nohz_lock);
-+
-+DEFINE_PER_CPU(unsigned int, lib_ring_buffer_nesting);
-+EXPORT_PER_CPU_SYMBOL(lib_ring_buffer_nesting);
-+
-+static
-+void lib_ring_buffer_print_errors(struct channel *chan,
-+ struct lib_ring_buffer *buf, int cpu);
-+
-+/*
-+ * Must be called under cpu hotplug protection.
-+ */
-+void lib_ring_buffer_free(struct lib_ring_buffer *buf)
-+{
-+ struct channel *chan = buf->backend.chan;
-+
-+ lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu);
-+ kfree(buf->commit_hot);
-+ kfree(buf->commit_cold);
-+
-+ lib_ring_buffer_backend_free(&buf->backend);
-+}
-+
-+/**
-+ * lib_ring_buffer_reset - Reset ring buffer to initial values.
-+ * @buf: Ring buffer.
-+ *
-+ * Effectively empty the ring buffer. Should be called when the buffer is not
-+ * used for writing. The ring buffer can be opened for reading, but the reader
-+ * should not be using the iterator concurrently with reset. The previous
-+ * current iterator record is reset.
-+ */
-+void lib_ring_buffer_reset(struct lib_ring_buffer *buf)
-+{
-+ struct channel *chan = buf->backend.chan;
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ unsigned int i;
-+
-+ /*
-+ * Reset iterator first. It will put the subbuffer if it currently holds
-+ * it.
-+ */
-+ lib_ring_buffer_iterator_reset(buf);
-+ v_set(config, &buf->offset, 0);
-+ for (i = 0; i < chan->backend.num_subbuf; i++) {
-+ v_set(config, &buf->commit_hot[i].cc, 0);
-+ v_set(config, &buf->commit_hot[i].seq, 0);
-+ v_set(config, &buf->commit_cold[i].cc_sb, 0);
-+ }
-+ atomic_long_set(&buf->consumed, 0);
-+ atomic_set(&buf->record_disabled, 0);
-+ v_set(config, &buf->last_tsc, 0);
-+ lib_ring_buffer_backend_reset(&buf->backend);
-+ /* Don't reset number of active readers */
-+ v_set(config, &buf->records_lost_full, 0);
-+ v_set(config, &buf->records_lost_wrap, 0);
-+ v_set(config, &buf->records_lost_big, 0);
-+ v_set(config, &buf->records_count, 0);
-+ v_set(config, &buf->records_overrun, 0);
-+ buf->finalized = 0;
-+}
-+EXPORT_SYMBOL_GPL(lib_ring_buffer_reset);
-+
-+/**
-+ * channel_reset - Reset channel to initial values.
-+ * @chan: Channel.
-+ *
-+ * Effectively empty the channel. Should be called when the channel is not used
-+ * for writing. The channel can be opened for reading, but the reader should not
-+ * be using the iterator concurrently with reset. The previous current iterator
-+ * record is reset.
-+ */
-+void channel_reset(struct channel *chan)
-+{
-+ /*
-+ * Reset iterators first. Will put the subbuffer if held for reading.
-+ */
-+ channel_iterator_reset(chan);
-+ atomic_set(&chan->record_disabled, 0);
-+ /* Don't reset commit_count_mask, still valid */
-+ channel_backend_reset(&chan->backend);
-+ /* Don't reset switch/read timer interval */
-+ /* Don't reset notifiers and notifier enable bits */
-+ /* Don't reset reader reference count */
-+}
-+EXPORT_SYMBOL_GPL(channel_reset);
-+
-+/*
-+ * Must be called under cpu hotplug protection.
-+ */
-+int lib_ring_buffer_create(struct lib_ring_buffer *buf,
-+ struct channel_backend *chanb, int cpu)
-+{
-+ const struct lib_ring_buffer_config *config = chanb->config;
-+ struct channel *chan = container_of(chanb, struct channel, backend);
-+ void *priv = chanb->priv;
-+ size_t subbuf_header_size;
-+ u64 tsc;
-+ int ret;
-+
-+ /* Test for cpu hotplug */
-+ if (buf->backend.allocated)
-+ return 0;
-+
-+ /*
-+ * Paranoia: per cpu dynamic allocation is not officially documented as
-+ * zeroing the memory, so let's do it here too, just in case.
-+ */
-+ memset(buf, 0, sizeof(*buf));
-+
-+ ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend, cpu);
-+ if (ret)
-+ return ret;
-+
-+ buf->commit_hot =
-+ kzalloc_node(ALIGN(sizeof(*buf->commit_hot)
-+ * chan->backend.num_subbuf,
-+ 1 << INTERNODE_CACHE_SHIFT),
-+ GFP_KERNEL, cpu_to_node(max(cpu, 0)));
-+ if (!buf->commit_hot) {
-+ ret = -ENOMEM;
-+ goto free_chanbuf;
-+ }
-+
-+ buf->commit_cold =
-+ kzalloc_node(ALIGN(sizeof(*buf->commit_cold)
-+ * chan->backend.num_subbuf,
-+ 1 << INTERNODE_CACHE_SHIFT),
-+ GFP_KERNEL, cpu_to_node(max(cpu, 0)));
-+ if (!buf->commit_cold) {
-+ ret = -ENOMEM;
-+ goto free_commit;
-+ }
-+
-+ init_waitqueue_head(&buf->read_wait);
-+ init_waitqueue_head(&buf->write_wait);
-+ raw_spin_lock_init(&buf->raw_tick_nohz_spinlock);
-+
-+ /*
-+ * Write the subbuffer header for first subbuffer so we know the total
-+ * duration of data gathering.
-+ */
-+ subbuf_header_size = config->cb.subbuffer_header_size();
-+ v_set(config, &buf->offset, subbuf_header_size);
-+ subbuffer_id_clear_noref(config, &buf->backend.buf_wsb[0].id);
-+ tsc = config->cb.ring_buffer_clock_read(buf->backend.chan);
-+ config->cb.buffer_begin(buf, tsc, 0);
-+ v_add(config, subbuf_header_size, &buf->commit_hot[0].cc);
-+
-+ if (config->cb.buffer_create) {
-+ ret = config->cb.buffer_create(buf, priv, cpu, chanb->name);
-+ if (ret)
-+ goto free_init;
-+ }
-+
-+ /*
-+ * Ensure the buffer is ready before setting it to allocated and setting
-+ * the cpumask.
-+ * Used for cpu hotplug vs cpumask iteration.
-+ */
-+ smp_wmb();
-+ buf->backend.allocated = 1;
-+
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-+ CHAN_WARN_ON(chan, cpumask_test_cpu(cpu,
-+ chan->backend.cpumask));
-+ cpumask_set_cpu(cpu, chan->backend.cpumask);
-+ }
-+
-+ return 0;
-+
-+ /* Error handling */
-+free_init:
-+ kfree(buf->commit_cold);
-+free_commit:
-+ kfree(buf->commit_hot);
-+free_chanbuf:
-+ lib_ring_buffer_backend_free(&buf->backend);
-+ return ret;
-+}
-+
-+static void switch_buffer_timer(unsigned long data)
-+{
-+ struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
-+ struct channel *chan = buf->backend.chan;
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+
-+ /*
-+ * Only flush buffers periodically if readers are active.
-+ */
-+ if (atomic_long_read(&buf->active_readers))
-+ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
-+
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-+ mod_timer_pinned(&buf->switch_timer,
-+ jiffies + chan->switch_timer_interval);
-+ else
-+ mod_timer(&buf->switch_timer,
-+ jiffies + chan->switch_timer_interval);
-+}
-+
-+/*
-+ * Called with ring_buffer_nohz_lock held for per-cpu buffers.
-+ */
-+static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf)
-+{
-+ struct channel *chan = buf->backend.chan;
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+
-+ if (!chan->switch_timer_interval || buf->switch_timer_enabled)
-+ return;
-+ init_timer(&buf->switch_timer);
-+ buf->switch_timer.function = switch_buffer_timer;
-+ buf->switch_timer.expires = jiffies + chan->switch_timer_interval;
-+ buf->switch_timer.data = (unsigned long)buf;
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-+ add_timer_on(&buf->switch_timer, buf->backend.cpu);
-+ else
-+ add_timer(&buf->switch_timer);
-+ buf->switch_timer_enabled = 1;
-+}
-+
-+/*
-+ * Called with ring_buffer_nohz_lock held for per-cpu buffers.
-+ */
-+static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf)
-+{
-+ struct channel *chan = buf->backend.chan;
-+
-+ if (!chan->switch_timer_interval || !buf->switch_timer_enabled)
-+ return;
-+
-+ del_timer_sync(&buf->switch_timer);
-+ buf->switch_timer_enabled = 0;
-+}
-+
-+/*
-+ * Polling timer to check the channels for data.
-+ */
-+static void read_buffer_timer(unsigned long data)
-+{
-+ struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
-+ struct channel *chan = buf->backend.chan;
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+
-+ CHAN_WARN_ON(chan, !buf->backend.allocated);
-+
-+ if (atomic_long_read(&buf->active_readers)
-+ && lib_ring_buffer_poll_deliver(config, buf, chan)) {
-+ wake_up_interruptible(&buf->read_wait);
-+ wake_up_interruptible(&chan->read_wait);
-+ }
-+
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-+ mod_timer_pinned(&buf->read_timer,
-+ jiffies + chan->read_timer_interval);
-+ else
-+ mod_timer(&buf->read_timer,
-+ jiffies + chan->read_timer_interval);
-+}
-+
-+/*
-+ * Called with ring_buffer_nohz_lock held for per-cpu buffers.
-+ */
-+static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf)
-+{
-+ struct channel *chan = buf->backend.chan;
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+
-+ if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
-+ || !chan->read_timer_interval
-+ || buf->read_timer_enabled)
-+ return;
-+
-+ init_timer(&buf->read_timer);
-+ buf->read_timer.function = read_buffer_timer;
-+ buf->read_timer.expires = jiffies + chan->read_timer_interval;
-+ buf->read_timer.data = (unsigned long)buf;
-+
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-+ add_timer_on(&buf->read_timer, buf->backend.cpu);
-+ else
-+ add_timer(&buf->read_timer);
-+ buf->read_timer_enabled = 1;
-+}
-+
-+/*
-+ * Called with ring_buffer_nohz_lock held for per-cpu buffers.
-+ */
-+static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf)
-+{
-+ struct channel *chan = buf->backend.chan;
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+
-+ if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
-+ || !chan->read_timer_interval
-+ || !buf->read_timer_enabled)
-+ return;
-+
-+ del_timer_sync(&buf->read_timer);
-+ /*
-+ * do one more check to catch data that has been written in the last
-+ * timer period.
-+ */
-+ if (lib_ring_buffer_poll_deliver(config, buf, chan)) {
-+ wake_up_interruptible(&buf->read_wait);
-+ wake_up_interruptible(&chan->read_wait);
-+ }
-+ buf->read_timer_enabled = 0;
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+/**
-+ * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
-+ * @nb: notifier block
-+ * @action: hotplug action to take
-+ * @hcpu: CPU number
-+ *
-+ * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
-+ */
-+static
-+int __cpuinit lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
-+ unsigned long action,
-+ void *hcpu)
-+{
-+ unsigned int cpu = (unsigned long)hcpu;
-+ struct channel *chan = container_of(nb, struct channel,
-+ cpu_hp_notifier);
-+ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+
-+ if (!chan->cpu_hp_enable)
-+ return NOTIFY_DONE;
-+
-+ CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
-+
-+ switch (action) {
-+ case CPU_DOWN_FAILED:
-+ case CPU_DOWN_FAILED_FROZEN:
-+ case CPU_ONLINE:
-+ case CPU_ONLINE_FROZEN:
-+ wake_up_interruptible(&chan->hp_wait);
-+ lib_ring_buffer_start_switch_timer(buf);
-+ lib_ring_buffer_start_read_timer(buf);
-+ return NOTIFY_OK;
-+
-+ case CPU_DOWN_PREPARE:
-+ case CPU_DOWN_PREPARE_FROZEN:
-+ lib_ring_buffer_stop_switch_timer(buf);
-+ lib_ring_buffer_stop_read_timer(buf);
-+ return NOTIFY_OK;
-+
-+ case CPU_DEAD:
-+ case CPU_DEAD_FROZEN:
-+ /*
-+ * Performing a buffer switch on a remote CPU. Performed by
-+ * the CPU responsible for doing the hotunplug after the target
-+ * CPU stopped running completely. Ensures that all data
-+ * from that remote CPU is flushed.
-+ */
-+ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
-+ return NOTIFY_OK;
-+
-+ default:
-+ return NOTIFY_DONE;
-+ }
-+}
-+#endif
-+
-+#if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER)
-+/*
-+ * For per-cpu buffers, call the reader wakeups before switching the buffer, so
-+ * that wake-up-tracing generated events are flushed before going idle (in
-+ * tick_nohz). We test if the spinlock is locked to deal with the race where
-+ * readers try to sample the ring buffer before we perform the switch. We let
-+ * the readers retry in that case. If there is data in the buffer, the wake up
-+ * is going to forbid the CPU running the reader thread from going idle.
-+ */
-+static int notrace ring_buffer_tick_nohz_callback(struct notifier_block *nb,
-+ unsigned long val,
-+ void *data)
-+{
-+ struct channel *chan = container_of(nb, struct channel,
-+ tick_nohz_notifier);
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ struct lib_ring_buffer *buf;
-+ int cpu = smp_processor_id();
-+
-+ if (config->alloc != RING_BUFFER_ALLOC_PER_CPU) {
-+ /*
-+ * We don't support keeping the system idle with global buffers
-+ * and streaming active. In order to do so, we would need to
-+ * sample a non-nohz-cpumask racelessly with the nohz updates
-+ * without adding synchronization overhead to nohz. Leave this
-+ * use-case out for now.
-+ */
-+ return 0;
-+ }
-+
-+ buf = channel_get_ring_buffer(config, chan, cpu);
-+ switch (val) {
-+ case TICK_NOHZ_FLUSH:
-+ raw_spin_lock(&buf->raw_tick_nohz_spinlock);
-+ if (config->wakeup == RING_BUFFER_WAKEUP_BY_TIMER
-+ && chan->read_timer_interval
-+ && atomic_long_read(&buf->active_readers)
-+ && (lib_ring_buffer_poll_deliver(config, buf, chan)
-+ || lib_ring_buffer_pending_data(config, buf, chan))) {
-+ wake_up_interruptible(&buf->read_wait);
-+ wake_up_interruptible(&chan->read_wait);
-+ }
-+ if (chan->switch_timer_interval)
-+ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
-+ raw_spin_unlock(&buf->raw_tick_nohz_spinlock);
-+ break;
-+ case TICK_NOHZ_STOP:
-+ spin_lock(&__get_cpu_var(ring_buffer_nohz_lock));
-+ lib_ring_buffer_stop_switch_timer(buf);
-+ lib_ring_buffer_stop_read_timer(buf);
-+ spin_unlock(&__get_cpu_var(ring_buffer_nohz_lock));
-+ break;
-+ case TICK_NOHZ_RESTART:
-+ spin_lock(&__get_cpu_var(ring_buffer_nohz_lock));
-+ lib_ring_buffer_start_read_timer(buf);
-+ lib_ring_buffer_start_switch_timer(buf);
-+ spin_unlock(&__get_cpu_var(ring_buffer_nohz_lock));
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+void notrace lib_ring_buffer_tick_nohz_flush(void)
-+{
-+ atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_FLUSH,
-+ NULL);
-+}
-+
-+void notrace lib_ring_buffer_tick_nohz_stop(void)
-+{
-+ atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_STOP,
-+ NULL);
-+}
-+
-+void notrace lib_ring_buffer_tick_nohz_restart(void)
-+{
-+ atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_RESTART,
-+ NULL);
-+}
-+#endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */
-+
-+/*
-+ * Holds CPU hotplug.
-+ */
-+static void channel_unregister_notifiers(struct channel *chan)
-+{
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ int cpu;
-+
-+ channel_iterator_unregister_notifiers(chan);
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-+#ifdef CONFIG_NO_HZ
-+ /*
-+ * Remove the nohz notifier first, so we are certain we stop
-+ * the timers.
-+ */
-+ atomic_notifier_chain_unregister(&tick_nohz_notifier,
-+ &chan->tick_nohz_notifier);
-+ /*
-+ * ring_buffer_nohz_lock will not be needed below, because
-+ * we just removed the notifiers, which were the only source of
-+ * concurrency.
-+ */
-+#endif /* CONFIG_NO_HZ */
-+#ifdef CONFIG_HOTPLUG_CPU
-+ get_online_cpus();
-+ chan->cpu_hp_enable = 0;
-+ for_each_online_cpu(cpu) {
-+ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
-+ cpu);
-+ lib_ring_buffer_stop_switch_timer(buf);
-+ lib_ring_buffer_stop_read_timer(buf);
-+ }
-+ put_online_cpus();
-+ unregister_cpu_notifier(&chan->cpu_hp_notifier);
-+#else
-+ for_each_possible_cpu(cpu) {
-+ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
-+ cpu);
-+ lib_ring_buffer_stop_switch_timer(buf);
-+ lib_ring_buffer_stop_read_timer(buf);
-+ }
-+#endif
-+ } else {
-+ struct lib_ring_buffer *buf = chan->backend.buf;
-+
-+ lib_ring_buffer_stop_switch_timer(buf);
-+ lib_ring_buffer_stop_read_timer(buf);
-+ }
-+ channel_backend_unregister_notifiers(&chan->backend);
-+}
-+
-+static void channel_free(struct channel *chan)
-+{
-+ channel_iterator_free(chan);
-+ channel_backend_free(&chan->backend);
-+ kfree(chan);
-+}
-+
-+/**
-+ * channel_create - Create channel.
-+ * @config: ring buffer instance configuration
-+ * @name: name of the channel
-+ * @priv: ring buffer client private data
-+ * @buf_addr: pointer the the beginning of the preallocated buffer contiguous
-+ * address mapping. It is used only by RING_BUFFER_STATIC
-+ * configuration. It can be set to NULL for other backends.
-+ * @subbuf_size: subbuffer size
-+ * @num_subbuf: number of subbuffers
-+ * @switch_timer_interval: Time interval (in us) to fill sub-buffers with
-+ * padding to let readers get those sub-buffers.
-+ * Used for live streaming.
-+ * @read_timer_interval: Time interval (in us) to wake up pending readers.
-+ *
-+ * Holds cpu hotplug.
-+ * Returns NULL on failure.
-+ */
-+struct channel *channel_create(const struct lib_ring_buffer_config *config,
-+ const char *name, void *priv, void *buf_addr,
-+ size_t subbuf_size,
-+ size_t num_subbuf, unsigned int switch_timer_interval,
-+ unsigned int read_timer_interval)
-+{
-+ int ret, cpu;
-+ struct channel *chan;
-+
-+ if (lib_ring_buffer_check_config(config, switch_timer_interval,
-+ read_timer_interval))
-+ return NULL;
-+
-+ chan = kzalloc(sizeof(struct channel), GFP_KERNEL);
-+ if (!chan)
-+ return NULL;
-+
-+ ret = channel_backend_init(&chan->backend, name, config, priv,
-+ subbuf_size, num_subbuf);
-+ if (ret)
-+ goto error;
-+
-+ ret = channel_iterator_init(chan);
-+ if (ret)
-+ goto error_free_backend;
-+
-+ chan->commit_count_mask = (~0UL >> chan->backend.num_subbuf_order);
-+ chan->switch_timer_interval = usecs_to_jiffies(switch_timer_interval);
-+ chan->read_timer_interval = usecs_to_jiffies(read_timer_interval);
-+ kref_init(&chan->ref);
-+ init_waitqueue_head(&chan->read_wait);
-+ init_waitqueue_head(&chan->hp_wait);
-+
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-+#if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER)
-+ /* Only benefit from NO_HZ idle with per-cpu buffers for now. */
-+ chan->tick_nohz_notifier.notifier_call =
-+ ring_buffer_tick_nohz_callback;
-+ chan->tick_nohz_notifier.priority = ~0U;
-+ atomic_notifier_chain_register(&tick_nohz_notifier,
-+ &chan->tick_nohz_notifier);
-+#endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */
-+
-+ /*
-+ * In case of non-hotplug cpu, if the ring-buffer is allocated
-+ * in early initcall, it will not be notified of secondary cpus.
-+ * In that off case, we need to allocate for all possible cpus.
-+ */
-+#ifdef CONFIG_HOTPLUG_CPU
-+ chan->cpu_hp_notifier.notifier_call =
-+ lib_ring_buffer_cpu_hp_callback;
-+ chan->cpu_hp_notifier.priority = 6;
-+ register_cpu_notifier(&chan->cpu_hp_notifier);
-+
-+ get_online_cpus();
-+ for_each_online_cpu(cpu) {
-+ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
-+ cpu);
-+ spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
-+ lib_ring_buffer_start_switch_timer(buf);
-+ lib_ring_buffer_start_read_timer(buf);
-+ spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu));
-+ }
-+ chan->cpu_hp_enable = 1;
-+ put_online_cpus();
-+#else
-+ for_each_possible_cpu(cpu) {
-+ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
-+ cpu);
-+ spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
-+ lib_ring_buffer_start_switch_timer(buf);
-+ lib_ring_buffer_start_read_timer(buf);
-+ spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu));
-+ }
-+#endif
-+ } else {
-+ struct lib_ring_buffer *buf = chan->backend.buf;
-+
-+ lib_ring_buffer_start_switch_timer(buf);
-+ lib_ring_buffer_start_read_timer(buf);
-+ }
-+
-+ return chan;
-+
-+error_free_backend:
-+ channel_backend_free(&chan->backend);
-+error:
-+ kfree(chan);
-+ return NULL;
-+}
-+EXPORT_SYMBOL_GPL(channel_create);
-+
-+static
-+void channel_release(struct kref *kref)
-+{
-+ struct channel *chan = container_of(kref, struct channel, ref);
-+ channel_free(chan);
-+}
-+
-+/**
-+ * channel_destroy - Finalize, wait for q.s. and destroy channel.
-+ * @chan: channel to destroy
-+ *
-+ * Holds cpu hotplug.
-+ * Call "destroy" callback, finalize channels, and then decrement the
-+ * channel reference count. Note that when readers have completed data
-+ * consumption of finalized channels, get_subbuf() will return -ENODATA.
-+ * They should release their handle at that point. Returns the private
-+ * data pointer.
-+ */
-+void *channel_destroy(struct channel *chan)
-+{
-+ int cpu;
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ void *priv;
-+
-+ channel_unregister_notifiers(chan);
-+
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-+ /*
-+ * No need to hold cpu hotplug, because all notifiers have been
-+ * unregistered.
-+ */
-+ for_each_channel_cpu(cpu, chan) {
-+ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
-+ cpu);
-+
-+ if (config->cb.buffer_finalize)
-+ config->cb.buffer_finalize(buf,
-+ chan->backend.priv,
-+ cpu);
-+ if (buf->backend.allocated)
-+ lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH);
-+ /*
-+ * Perform flush before writing to finalized.
-+ */
-+ smp_wmb();
-+ ACCESS_ONCE(buf->finalized) = 1;
-+ wake_up_interruptible(&buf->read_wait);
-+ }
-+ } else {
-+ struct lib_ring_buffer *buf = chan->backend.buf;
-+
-+ if (config->cb.buffer_finalize)
-+ config->cb.buffer_finalize(buf, chan->backend.priv, -1);
-+ if (buf->backend.allocated)
-+ lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH);
-+ /*
-+ * Perform flush before writing to finalized.
-+ */
-+ smp_wmb();
-+ ACCESS_ONCE(buf->finalized) = 1;
-+ wake_up_interruptible(&buf->read_wait);
-+ }
-+ ACCESS_ONCE(chan->finalized) = 1;
-+ wake_up_interruptible(&chan->hp_wait);
-+ wake_up_interruptible(&chan->read_wait);
-+ priv = chan->backend.priv;
-+ kref_put(&chan->ref, channel_release);
-+ return priv;
-+}
-+EXPORT_SYMBOL_GPL(channel_destroy);
-+
-+struct lib_ring_buffer *channel_get_ring_buffer(
-+ const struct lib_ring_buffer_config *config,
-+ struct channel *chan, int cpu)
-+{
-+ if (config->alloc == RING_BUFFER_ALLOC_GLOBAL)
-+ return chan->backend.buf;
-+ else
-+ return per_cpu_ptr(chan->backend.buf, cpu);
-+}
-+EXPORT_SYMBOL_GPL(channel_get_ring_buffer);
-+
-+int lib_ring_buffer_open_read(struct lib_ring_buffer *buf)
-+{
-+ struct channel *chan = buf->backend.chan;
-+
-+ if (!atomic_long_add_unless(&buf->active_readers, 1, 1))
-+ return -EBUSY;
-+ kref_get(&chan->ref);
-+ smp_mb__after_atomic_inc();
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(lib_ring_buffer_open_read);
-+
-+void lib_ring_buffer_release_read(struct lib_ring_buffer *buf)
-+{
-+ struct channel *chan = buf->backend.chan;
-+
-+ CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
-+ smp_mb__before_atomic_dec();
-+ atomic_long_dec(&buf->active_readers);
-+ kref_put(&chan->ref, channel_release);
-+}
-+EXPORT_SYMBOL_GPL(lib_ring_buffer_release_read);
-+
-+/*
-+ * Promote compiler barrier to a smp_mb().
-+ * For the specific ring buffer case, this IPI call should be removed if the
-+ * architecture does not reorder writes. This should eventually be provided by
-+ * a separate architecture-specific infrastructure.
-+ */
-+static void remote_mb(void *info)
-+{
-+ smp_mb();
-+}
-+
-+/**
-+ * lib_ring_buffer_snapshot - save subbuffer position snapshot (for read)
-+ * @buf: ring buffer
-+ * @consumed: consumed count indicating the position where to read
-+ * @produced: produced count, indicates position when to stop reading
-+ *
-+ * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
-+ * data to read at consumed position, or 0 if the get operation succeeds.
-+ * Busy-loop trying to get data if the tick_nohz sequence lock is held.
-+ */
-+
-+int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
-+ unsigned long *consumed, unsigned long *produced)
-+{
-+ struct channel *chan = buf->backend.chan;
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ unsigned long consumed_cur, write_offset;
-+ int finalized;
-+
-+retry:
-+ finalized = ACCESS_ONCE(buf->finalized);
-+ /*
-+ * Read finalized before counters.
-+ */
-+ smp_rmb();
-+ consumed_cur = atomic_long_read(&buf->consumed);
-+ /*
-+ * No need to issue a memory barrier between consumed count read and
-+ * write offset read, because consumed count can only change
-+ * concurrently in overwrite mode, and we keep a sequence counter
-+ * identifier derived from the write offset to check we are getting
-+ * the same sub-buffer we are expecting (the sub-buffers are atomically
-+ * "tagged" upon writes, tags are checked upon read).
-+ */
-+ write_offset = v_read(config, &buf->offset);
-+
-+ /*
-+ * Check that we are not about to read the same subbuffer in
-+ * which the writer head is.
-+ */
-+ if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_cur, chan)
-+ == 0)
-+ goto nodata;
-+
-+ *consumed = consumed_cur;
-+ *produced = subbuf_trunc(write_offset, chan);
-+
-+ return 0;
-+
-+nodata:
-+ /*
-+ * The memory barriers __wait_event()/wake_up_interruptible() take care
-+ * of "raw_spin_is_locked" memory ordering.
-+ */
-+ if (finalized)
-+ return -ENODATA;
-+ else if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
-+ goto retry;
-+ else
-+ return -EAGAIN;
-+}
-+EXPORT_SYMBOL_GPL(lib_ring_buffer_snapshot);
-+
-+/**
-+ * lib_ring_buffer_put_snapshot - move consumed counter forward
-+ *
-+ * Should only be called from consumer context.
-+ * @buf: ring buffer
-+ * @consumed_new: new consumed count value
-+ */
-+void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
-+ unsigned long consumed_new)
-+{
-+ struct lib_ring_buffer_backend *bufb = &buf->backend;
-+ struct channel *chan = bufb->chan;
-+ unsigned long consumed;
-+
-+ CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
-+
-+ /*
-+ * Only push the consumed value forward.
-+ * If the consumed cmpxchg fails, this is because we have been pushed by
-+ * the writer in flight recorder mode.
-+ */
-+ consumed = atomic_long_read(&buf->consumed);
-+ while ((long) consumed - (long) consumed_new < 0)
-+ consumed = atomic_long_cmpxchg(&buf->consumed, consumed,
-+ consumed_new);
-+ /* Wake-up the metadata producer */
-+ wake_up_interruptible(&buf->write_wait);
-+}
-+EXPORT_SYMBOL_GPL(lib_ring_buffer_move_consumer);
-+
-+/**
-+ * lib_ring_buffer_get_subbuf - get exclusive access to subbuffer for reading
-+ * @buf: ring buffer
-+ * @consumed: consumed count indicating the position where to read
-+ *
-+ * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
-+ * data to read at consumed position, or 0 if the get operation succeeds.
-+ * Busy-loop trying to get data if the tick_nohz sequence lock is held.
-+ */
-+int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
-+ unsigned long consumed)
-+{
-+ struct channel *chan = buf->backend.chan;
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
-+ int ret;
-+ int finalized;
-+
-+retry:
-+ finalized = ACCESS_ONCE(buf->finalized);
-+ /*
-+ * Read finalized before counters.
-+ */
-+ smp_rmb();
-+ consumed_cur = atomic_long_read(&buf->consumed);
-+ consumed_idx = subbuf_index(consumed, chan);
-+ commit_count = v_read(config, &buf->commit_cold[consumed_idx].cc_sb);
-+ /*
-+ * Make sure we read the commit count before reading the buffer
-+ * data and the write offset. Correct consumed offset ordering
-+ * wrt commit count is insured by the use of cmpxchg to update
-+ * the consumed offset.
-+ * smp_call_function_single can fail if the remote CPU is offline,
-+ * this is OK because then there is no wmb to execute there.
-+ * If our thread is executing on the same CPU as the on the buffers
-+ * belongs to, we don't have to synchronize it at all. If we are
-+ * migrated, the scheduler will take care of the memory barriers.
-+ * Normally, smp_call_function_single() should ensure program order when
-+ * executing the remote function, which implies that it surrounds the
-+ * function execution with :
-+ * smp_mb()
-+ * send IPI
-+ * csd_lock_wait
-+ * recv IPI
-+ * smp_mb()
-+ * exec. function
-+ * smp_mb()
-+ * csd unlock
-+ * smp_mb()
-+ *
-+ * However, smp_call_function_single() does not seem to clearly execute
-+ * such barriers. It depends on spinlock semantic to provide the barrier
-+ * before executing the IPI and, when busy-looping, csd_lock_wait only
-+ * executes smp_mb() when it has to wait for the other CPU.
-+ *
-+ * I don't trust this code. Therefore, let's add the smp_mb() sequence
-+ * required ourself, even if duplicated. It has no performance impact
-+ * anyway.
-+ *
-+ * smp_mb() is needed because smp_rmb() and smp_wmb() only order read vs
-+ * read and write vs write. They do not ensure core synchronization. We
-+ * really have to ensure total order between the 3 barriers running on
-+ * the 2 CPUs.
-+ */
-+ if (config->ipi == RING_BUFFER_IPI_BARRIER) {
-+ if (config->sync == RING_BUFFER_SYNC_PER_CPU
-+ && config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-+ if (raw_smp_processor_id() != buf->backend.cpu) {
-+ /* Total order with IPI handler smp_mb() */
-+ smp_mb();
-+ smp_call_function_single(buf->backend.cpu,
-+ remote_mb, NULL, 1);
-+ /* Total order with IPI handler smp_mb() */
-+ smp_mb();
-+ }
-+ } else {
-+ /* Total order with IPI handler smp_mb() */
-+ smp_mb();
-+ smp_call_function(remote_mb, NULL, 1);
-+ /* Total order with IPI handler smp_mb() */
-+ smp_mb();
-+ }
-+ } else {
-+ /*
-+ * Local rmb to match the remote wmb to read the commit count
-+ * before the buffer data and the write offset.
-+ */
-+ smp_rmb();
-+ }
-+
-+ write_offset = v_read(config, &buf->offset);
-+
-+ /*
-+ * Check that the buffer we are getting is after or at consumed_cur
-+ * position.
-+ */
-+ if ((long) subbuf_trunc(consumed, chan)
-+ - (long) subbuf_trunc(consumed_cur, chan) < 0)
-+ goto nodata;
-+
-+ /*
-+ * Check that the subbuffer we are trying to consume has been
-+ * already fully committed.
-+ */
-+ if (((commit_count - chan->backend.subbuf_size)
-+ & chan->commit_count_mask)
-+ - (buf_trunc(consumed_cur, chan)
-+ >> chan->backend.num_subbuf_order)
-+ != 0)
-+ goto nodata;
-+
-+ /*
-+ * Check that we are not about to read the same subbuffer in
-+ * which the writer head is.
-+ */
-+ if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_cur, chan)
-+ == 0)
-+ goto nodata;
-+
-+ /*
-+ * Failure to get the subbuffer causes a busy-loop retry without going
-+ * to a wait queue. These are caused by short-lived race windows where
-+ * the writer is getting access to a subbuffer we were trying to get
-+ * access to. Also checks that the "consumed" buffer count we are
-+ * looking for matches the one contained in the subbuffer id.
-+ */
-+ ret = update_read_sb_index(config, &buf->backend, &chan->backend,
-+ consumed_idx, buf_trunc_val(consumed, chan));
-+ if (ret)
-+ goto retry;
-+ subbuffer_id_clear_noref(config, &buf->backend.buf_rsb.id);
-+
-+ buf->get_subbuf_consumed = consumed;
-+ buf->get_subbuf = 1;
-+
-+ return 0;
-+
-+nodata:
-+ /*
-+ * The memory barriers __wait_event()/wake_up_interruptible() take care
-+ * of "raw_spin_is_locked" memory ordering.
-+ */
-+ if (finalized)
-+ return -ENODATA;
-+ else if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
-+ goto retry;
-+ else
-+ return -EAGAIN;
-+}
-+EXPORT_SYMBOL_GPL(lib_ring_buffer_get_subbuf);
-+
-+/**
-+ * lib_ring_buffer_put_subbuf - release exclusive subbuffer access
-+ * @buf: ring buffer
-+ */
-+void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf)
-+{
-+ struct lib_ring_buffer_backend *bufb = &buf->backend;
-+ struct channel *chan = bufb->chan;
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ unsigned long read_sb_bindex, consumed_idx, consumed;
-+
-+ CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
-+
-+ if (!buf->get_subbuf) {
-+ /*
-+ * Reader puts a subbuffer it did not get.
-+ */
-+ CHAN_WARN_ON(chan, 1);
-+ return;
-+ }
-+ consumed = buf->get_subbuf_consumed;
-+ buf->get_subbuf = 0;
-+
-+ /*
-+ * Clear the records_unread counter. (overruns counter)
-+ * Can still be non-zero if a file reader simply grabbed the data
-+ * without using iterators.
-+ * Can be below zero if an iterator is used on a snapshot more than
-+ * once.
-+ */
-+ read_sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
-+ v_add(config, v_read(config,
-+ &bufb->array[read_sb_bindex]->records_unread),
-+ &bufb->records_read);
-+ v_set(config, &bufb->array[read_sb_bindex]->records_unread, 0);
-+ CHAN_WARN_ON(chan, config->mode == RING_BUFFER_OVERWRITE
-+ && subbuffer_id_is_noref(config, bufb->buf_rsb.id));
-+ subbuffer_id_set_noref(config, &bufb->buf_rsb.id);
-+
-+ /*
-+ * Exchange the reader subbuffer with the one we put in its place in the
-+ * writer subbuffer table. Expect the original consumed count. If
-+ * update_read_sb_index fails, this is because the writer updated the
-+ * subbuffer concurrently. We should therefore keep the subbuffer we
-+ * currently have: it has become invalid to try reading this sub-buffer
-+ * consumed count value anyway.
-+ */
-+ consumed_idx = subbuf_index(consumed, chan);
-+ update_read_sb_index(config, &buf->backend, &chan->backend,
-+ consumed_idx, buf_trunc_val(consumed, chan));
-+ /*
-+ * update_read_sb_index return value ignored. Don't exchange sub-buffer
-+ * if the writer concurrently updated it.
-+ */
-+}
-+EXPORT_SYMBOL_GPL(lib_ring_buffer_put_subbuf);
-+
-+/*
-+ * cons_offset is an iterator on all subbuffer offsets between the reader
-+ * position and the writer position. (inclusive)
-+ */
-+static
-+void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf,
-+ struct channel *chan,
-+ unsigned long cons_offset,
-+ int cpu)
-+{
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ unsigned long cons_idx, commit_count, commit_count_sb;
-+
-+ cons_idx = subbuf_index(cons_offset, chan);
-+ commit_count = v_read(config, &buf->commit_hot[cons_idx].cc);
-+ commit_count_sb = v_read(config, &buf->commit_cold[cons_idx].cc_sb);
-+
-+ if (subbuf_offset(commit_count, chan) != 0)
-+ printk(KERN_WARNING
-+ "ring buffer %s, cpu %d: "
-+ "commit count in subbuffer %lu,\n"
-+ "expecting multiples of %lu bytes\n"
-+ " [ %lu bytes committed, %lu bytes reader-visible ]\n",
-+ chan->backend.name, cpu, cons_idx,
-+ chan->backend.subbuf_size,
-+ commit_count, commit_count_sb);
-+
-+ printk(KERN_DEBUG "ring buffer: %s, cpu %d: %lu bytes committed\n",
-+ chan->backend.name, cpu, commit_count);
-+}
-+
-+static
-+void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf,
-+ struct channel *chan,
-+ void *priv, int cpu)
-+{
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ unsigned long write_offset, cons_offset;
-+
-+ /*
-+ * Can be called in the error path of allocation when
-+ * trans_channel_data is not yet set.
-+ */
-+ if (!chan)
-+ return;
-+ /*
-+ * No need to order commit_count, write_offset and cons_offset reads
-+ * because we execute at teardown when no more writer nor reader
-+ * references are left.
-+ */
-+ write_offset = v_read(config, &buf->offset);
-+ cons_offset = atomic_long_read(&buf->consumed);
-+ if (write_offset != cons_offset)
-+ printk(KERN_DEBUG
-+ "ring buffer %s, cpu %d: "
-+ "non-consumed data\n"
-+ " [ %lu bytes written, %lu bytes read ]\n",
-+ chan->backend.name, cpu, write_offset, cons_offset);
-+
-+ for (cons_offset = atomic_long_read(&buf->consumed);
-+ (long) (subbuf_trunc((unsigned long) v_read(config, &buf->offset),
-+ chan)
-+ - cons_offset) > 0;
-+ cons_offset = subbuf_align(cons_offset, chan))
-+ lib_ring_buffer_print_subbuffer_errors(buf, chan, cons_offset,
-+ cpu);
-+}
-+
-+static
-+void lib_ring_buffer_print_errors(struct channel *chan,
-+ struct lib_ring_buffer *buf, int cpu)
-+{
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ void *priv = chan->backend.priv;
-+
-+ printk(KERN_DEBUG "ring buffer %s, cpu %d: %lu records written, "
-+ "%lu records overrun\n",
-+ chan->backend.name, cpu,
-+ v_read(config, &buf->records_count),
-+ v_read(config, &buf->records_overrun));
-+
-+ if (v_read(config, &buf->records_lost_full)
-+ || v_read(config, &buf->records_lost_wrap)
-+ || v_read(config, &buf->records_lost_big))
-+ printk(KERN_WARNING
-+ "ring buffer %s, cpu %d: records were lost. Caused by:\n"
-+ " [ %lu buffer full, %lu nest buffer wrap-around, "
-+ "%lu event too big ]\n",
-+ chan->backend.name, cpu,
-+ v_read(config, &buf->records_lost_full),
-+ v_read(config, &buf->records_lost_wrap),
-+ v_read(config, &buf->records_lost_big));
-+
-+ lib_ring_buffer_print_buffer_errors(buf, chan, priv, cpu);
-+}
-+
-+/*
-+ * lib_ring_buffer_switch_old_start: Populate old subbuffer header.
-+ *
-+ * Only executed when the buffer is finalized, in SWITCH_FLUSH.
-+ */
-+static
-+void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf,
-+ struct channel *chan,
-+ struct switch_offsets *offsets,
-+ u64 tsc)
-+{
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ unsigned long oldidx = subbuf_index(offsets->old, chan);
-+ unsigned long commit_count;
-+
-+ config->cb.buffer_begin(buf, tsc, oldidx);
-+
-+ /*
-+ * Order all writes to buffer before the commit count update that will
-+ * determine that the subbuffer is full.
-+ */
-+ if (config->ipi == RING_BUFFER_IPI_BARRIER) {
-+ /*
-+ * Must write slot data before incrementing commit count. This
-+ * compiler barrier is upgraded into a smp_mb() by the IPI sent
-+ * by get_subbuf().
-+ */
-+ barrier();
-+ } else
-+ smp_wmb();
-+ v_add(config, config->cb.subbuffer_header_size(),
-+ &buf->commit_hot[oldidx].cc);
-+ commit_count = v_read(config, &buf->commit_hot[oldidx].cc);
-+ /* Check if the written buffer has to be delivered */
-+ lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
-+ commit_count, oldidx);
-+ lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
-+ offsets->old, commit_count,
-+ config->cb.subbuffer_header_size());
-+}
-+
-+/*
-+ * lib_ring_buffer_switch_old_end: switch old subbuffer
-+ *
-+ * Note : offset_old should never be 0 here. It is ok, because we never perform
-+ * buffer switch on an empty subbuffer in SWITCH_ACTIVE mode. The caller
-+ * increments the offset_old value when doing a SWITCH_FLUSH on an empty
-+ * subbuffer.
-+ */
-+static
-+void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf,
-+ struct channel *chan,
-+ struct switch_offsets *offsets,
-+ u64 tsc)
-+{
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
-+ unsigned long commit_count, padding_size, data_size;
-+
-+ data_size = subbuf_offset(offsets->old - 1, chan) + 1;
-+ padding_size = chan->backend.subbuf_size - data_size;
-+ subbuffer_set_data_size(config, &buf->backend, oldidx, data_size);
-+
-+ /*
-+ * Order all writes to buffer before the commit count update that will
-+ * determine that the subbuffer is full.
-+ */
-+ if (config->ipi == RING_BUFFER_IPI_BARRIER) {
-+ /*
-+ * Must write slot data before incrementing commit count. This
-+ * compiler barrier is upgraded into a smp_mb() by the IPI sent
-+ * by get_subbuf().
-+ */
-+ barrier();
-+ } else
-+ smp_wmb();
-+ v_add(config, padding_size, &buf->commit_hot[oldidx].cc);
-+ commit_count = v_read(config, &buf->commit_hot[oldidx].cc);
-+ lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
-+ commit_count, oldidx);
-+ lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
-+ offsets->old, commit_count,
-+ padding_size);
-+}
-+
-+/*
-+ * lib_ring_buffer_switch_new_start: Populate new subbuffer.
-+ *
-+ * This code can be executed unordered : writers may already have written to the
-+ * sub-buffer before this code gets executed, caution. The commit makes sure
-+ * that this code is executed before the deliver of this sub-buffer.
-+ */
-+static
-+void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf,
-+ struct channel *chan,
-+ struct switch_offsets *offsets,
-+ u64 tsc)
-+{
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ unsigned long beginidx = subbuf_index(offsets->begin, chan);
-+ unsigned long commit_count;
-+
-+ config->cb.buffer_begin(buf, tsc, beginidx);
-+
-+ /*
-+ * Order all writes to buffer before the commit count update that will
-+ * determine that the subbuffer is full.
-+ */
-+ if (config->ipi == RING_BUFFER_IPI_BARRIER) {
-+ /*
-+ * Must write slot data before incrementing commit count. This
-+ * compiler barrier is upgraded into a smp_mb() by the IPI sent
-+ * by get_subbuf().
-+ */
-+ barrier();
-+ } else
-+ smp_wmb();
-+ v_add(config, config->cb.subbuffer_header_size(),
-+ &buf->commit_hot[beginidx].cc);
-+ commit_count = v_read(config, &buf->commit_hot[beginidx].cc);
-+ /* Check if the written buffer has to be delivered */
-+ lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
-+ commit_count, beginidx);
-+ lib_ring_buffer_write_commit_counter(config, buf, chan, beginidx,
-+ offsets->begin, commit_count,
-+ config->cb.subbuffer_header_size());
-+}
-+
-+/*
-+ * lib_ring_buffer_switch_new_end: finish switching current subbuffer
-+ *
-+ * The only remaining threads could be the ones with pending commits. They will
-+ * have to do the deliver themselves.
-+ */
-+static
-+void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
-+ struct channel *chan,
-+ struct switch_offsets *offsets,
-+ u64 tsc)
-+{
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ unsigned long endidx = subbuf_index(offsets->end - 1, chan);
-+ unsigned long commit_count, padding_size, data_size;
-+
-+ data_size = subbuf_offset(offsets->end - 1, chan) + 1;
-+ padding_size = chan->backend.subbuf_size - data_size;
-+ subbuffer_set_data_size(config, &buf->backend, endidx, data_size);
-+
-+ /*
-+ * Order all writes to buffer before the commit count update that will
-+ * determine that the subbuffer is full.
-+ */
-+ if (config->ipi == RING_BUFFER_IPI_BARRIER) {
-+ /*
-+ * Must write slot data before incrementing commit count. This
-+ * compiler barrier is upgraded into a smp_mb() by the IPI sent
-+ * by get_subbuf().
-+ */
-+ barrier();
-+ } else
-+ smp_wmb();
-+ v_add(config, padding_size, &buf->commit_hot[endidx].cc);
-+ commit_count = v_read(config, &buf->commit_hot[endidx].cc);
-+ lib_ring_buffer_check_deliver(config, buf, chan, offsets->end - 1,
-+ commit_count, endidx);
-+ lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
-+ offsets->end, commit_count,
-+ padding_size);
-+}
-+
-+/*
-+ * Returns :
-+ * 0 if ok
-+ * !0 if execution must be aborted.
-+ */
-+static
-+int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
-+ struct lib_ring_buffer *buf,
-+ struct channel *chan,
-+ struct switch_offsets *offsets,
-+ u64 *tsc)
-+{
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ unsigned long off;
-+
-+ offsets->begin = v_read(config, &buf->offset);
-+ offsets->old = offsets->begin;
-+ offsets->switch_old_start = 0;
-+ off = subbuf_offset(offsets->begin, chan);
-+
-+ *tsc = config->cb.ring_buffer_clock_read(chan);
-+
-+ /*
-+ * Ensure we flush the header of an empty subbuffer when doing the
-+ * finalize (SWITCH_FLUSH). This ensures that we end up knowing the
-+ * total data gathering duration even if there were no records saved
-+ * after the last buffer switch.
-+ * In SWITCH_ACTIVE mode, switch the buffer when it contains events.
-+ * SWITCH_ACTIVE only flushes the current subbuffer, dealing with end of
-+ * subbuffer header as appropriate.
-+ * The next record that reserves space will be responsible for
-+ * populating the following subbuffer header. We choose not to populate
-+ * the next subbuffer header here because we want to be able to use
-+ * SWITCH_ACTIVE for periodical buffer flush and CPU tick_nohz stop
-+ * buffer flush, which must guarantee that all the buffer content
-+ * (records and header timestamps) are visible to the reader. This is
-+ * required for quiescence guarantees for the fusion merge.
-+ */
-+ if (mode == SWITCH_FLUSH || off > 0) {
-+ if (unlikely(off == 0)) {
-+ /*
-+ * The client does not save any header information.
-+ * Don't switch empty subbuffer on finalize, because it
-+ * is invalid to deliver a completely empty subbuffer.
-+ */
-+ if (!config->cb.subbuffer_header_size())
-+ return -1;
-+ /*
-+ * Need to write the subbuffer start header on finalize.
-+ */
-+ offsets->switch_old_start = 1;
-+ }
-+ offsets->begin = subbuf_align(offsets->begin, chan);
-+ } else
-+ return -1; /* we do not have to switch : buffer is empty */
-+ /* Note: old points to the next subbuf at offset 0 */
-+ offsets->end = offsets->begin;
-+ return 0;
-+}
-+
-+/*
-+ * Force a sub-buffer switch. This operation is completely reentrant : can be
-+ * called while tracing is active with absolutely no lock held.
-+ *
-+ * Note, however, that as a v_cmpxchg is used for some atomic
-+ * operations, this function must be called from the CPU which owns the buffer
-+ * for a ACTIVE flush.
-+ */
-+void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode)
-+{
-+ struct channel *chan = buf->backend.chan;
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ struct switch_offsets offsets;
-+ unsigned long oldidx;
-+ u64 tsc;
-+
-+ offsets.size = 0;
-+
-+ /*
-+ * Perform retryable operations.
-+ */
-+ do {
-+ if (lib_ring_buffer_try_switch_slow(mode, buf, chan, &offsets,
-+ &tsc))
-+ return; /* Switch not needed */
-+ } while (v_cmpxchg(config, &buf->offset, offsets.old, offsets.end)
-+ != offsets.old);
-+
-+ /*
-+ * Atomically update last_tsc. This update races against concurrent
-+ * atomic updates, but the race will always cause supplementary full TSC
-+ * records, never the opposite (missing a full TSC record when it would
-+ * be needed).
-+ */
-+ save_last_tsc(config, buf, tsc);
-+
-+ /*
-+ * Push the reader if necessary
-+ */
-+ lib_ring_buffer_reserve_push_reader(buf, chan, offsets.old);
-+
-+ oldidx = subbuf_index(offsets.old, chan);
-+ lib_ring_buffer_clear_noref(config, &buf->backend, oldidx);
-+
-+ /*
-+ * May need to populate header start on SWITCH_FLUSH.
-+ */
-+ if (offsets.switch_old_start) {
-+ lib_ring_buffer_switch_old_start(buf, chan, &offsets, tsc);
-+ offsets.old += config->cb.subbuffer_header_size();
-+ }
-+
-+ /*
-+ * Switch old subbuffer.
-+ */
-+ lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc);
-+}
-+EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_slow);
-+
-+/*
-+ * Returns :
-+ * 0 if ok
-+ * -ENOSPC if event size is too large for packet.
-+ * -ENOBUFS if there is currently not enough space in buffer for the event.
-+ * -EIO if data cannot be written into the buffer for any other reason.
-+ */
-+static
-+int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
-+ struct channel *chan,
-+ struct switch_offsets *offsets,
-+ struct lib_ring_buffer_ctx *ctx)
-+{
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ unsigned long reserve_commit_diff;
-+
-+ offsets->begin = v_read(config, &buf->offset);
-+ offsets->old = offsets->begin;
-+ offsets->switch_new_start = 0;
-+ offsets->switch_new_end = 0;
-+ offsets->switch_old_end = 0;
-+ offsets->pre_header_padding = 0;
-+
-+ ctx->tsc = config->cb.ring_buffer_clock_read(chan);
-+ if ((int64_t) ctx->tsc == -EIO)
-+ return -EIO;
-+
-+ if (last_tsc_overflow(config, buf, ctx->tsc))
-+ ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
-+
-+ if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
-+ offsets->switch_new_start = 1; /* For offsets->begin */
-+ } else {
-+ offsets->size = config->cb.record_header_size(config, chan,
-+ offsets->begin,
-+ &offsets->pre_header_padding,
-+ ctx);
-+ offsets->size +=
-+ lib_ring_buffer_align(offsets->begin + offsets->size,
-+ ctx->largest_align)
-+ + ctx->data_size;
-+ if (unlikely(subbuf_offset(offsets->begin, chan) +
-+ offsets->size > chan->backend.subbuf_size)) {
-+ offsets->switch_old_end = 1; /* For offsets->old */
-+ offsets->switch_new_start = 1; /* For offsets->begin */
-+ }
-+ }
-+ if (unlikely(offsets->switch_new_start)) {
-+ unsigned long sb_index;
-+
-+ /*
-+ * We are typically not filling the previous buffer completely.
-+ */
-+ if (likely(offsets->switch_old_end))
-+ offsets->begin = subbuf_align(offsets->begin, chan);
-+ offsets->begin = offsets->begin
-+ + config->cb.subbuffer_header_size();
-+ /* Test new buffer integrity */
-+ sb_index = subbuf_index(offsets->begin, chan);
-+ reserve_commit_diff =
-+ (buf_trunc(offsets->begin, chan)
-+ >> chan->backend.num_subbuf_order)
-+ - ((unsigned long) v_read(config,
-+ &buf->commit_cold[sb_index].cc_sb)
-+ & chan->commit_count_mask);
-+ if (likely(reserve_commit_diff == 0)) {
-+ /* Next subbuffer not being written to. */
-+ if (unlikely(config->mode != RING_BUFFER_OVERWRITE &&
-+ subbuf_trunc(offsets->begin, chan)
-+ - subbuf_trunc((unsigned long)
-+ atomic_long_read(&buf->consumed), chan)
-+ >= chan->backend.buf_size)) {
-+ /*
-+ * We do not overwrite non consumed buffers
-+ * and we are full : record is lost.
-+ */
-+ v_inc(config, &buf->records_lost_full);
-+ return -ENOBUFS;
-+ } else {
-+ /*
-+ * Next subbuffer not being written to, and we
-+ * are either in overwrite mode or the buffer is
-+ * not full. It's safe to write in this new
-+ * subbuffer.
-+ */
-+ }
-+ } else {
-+ /*
-+ * Next subbuffer reserve offset does not match the
-+ * commit offset. Drop record in producer-consumer and
-+ * overwrite mode. Caused by either a writer OOPS or too
-+ * many nested writes over a reserve/commit pair.
-+ */
-+ v_inc(config, &buf->records_lost_wrap);
-+ return -EIO;
-+ }
-+ offsets->size =
-+ config->cb.record_header_size(config, chan,
-+ offsets->begin,
-+ &offsets->pre_header_padding,
-+ ctx);
-+ offsets->size +=
-+ lib_ring_buffer_align(offsets->begin + offsets->size,
-+ ctx->largest_align)
-+ + ctx->data_size;
-+ if (unlikely(subbuf_offset(offsets->begin, chan)
-+ + offsets->size > chan->backend.subbuf_size)) {
-+ /*
-+ * Record too big for subbuffers, report error, don't
-+ * complete the sub-buffer switch.
-+ */
-+ v_inc(config, &buf->records_lost_big);
-+ return -ENOSPC;
-+ } else {
-+ /*
-+ * We just made a successful buffer switch and the
-+ * record fits in the new subbuffer. Let's write.
-+ */
-+ }
-+ } else {
-+ /*
-+ * Record fits in the current buffer and we are not on a switch
-+ * boundary. It's safe to write.
-+ */
-+ }
-+ offsets->end = offsets->begin + offsets->size;
-+
-+ if (unlikely(subbuf_offset(offsets->end, chan) == 0)) {
-+ /*
-+ * The offset_end will fall at the very beginning of the next
-+ * subbuffer.
-+ */
-+ offsets->switch_new_end = 1; /* For offsets->begin */
-+ }
-+ return 0;
-+}
-+
-+/**
-+ * lib_ring_buffer_reserve_slow - Atomic slot reservation in a buffer.
-+ * @ctx: ring buffer context.
-+ *
-+ * Return : -NOBUFS if not enough space, -ENOSPC if event size too large,
-+ * -EIO for other errors, else returns 0.
-+ * It will take care of sub-buffer switching.
-+ */
-+int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx)
-+{
-+ struct channel *chan = ctx->chan;
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ struct lib_ring_buffer *buf;
-+ struct switch_offsets offsets;
-+ int ret;
-+
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-+ buf = per_cpu_ptr(chan->backend.buf, ctx->cpu);
-+ else
-+ buf = chan->backend.buf;
-+ ctx->buf = buf;
-+
-+ offsets.size = 0;
-+
-+ do {
-+ ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
-+ ctx);
-+ if (unlikely(ret))
-+ return ret;
-+ } while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
-+ offsets.end)
-+ != offsets.old));
-+
-+ /*
-+ * Atomically update last_tsc. This update races against concurrent
-+ * atomic updates, but the race will always cause supplementary full TSC
-+ * records, never the opposite (missing a full TSC record when it would
-+ * be needed).
-+ */
-+ save_last_tsc(config, buf, ctx->tsc);
-+
-+ /*
-+ * Push the reader if necessary
-+ */
-+ lib_ring_buffer_reserve_push_reader(buf, chan, offsets.end - 1);
-+
-+ /*
-+ * Clear noref flag for this subbuffer.
-+ */
-+ lib_ring_buffer_clear_noref(config, &buf->backend,
-+ subbuf_index(offsets.end - 1, chan));
-+
-+ /*
-+ * Switch old subbuffer if needed.
-+ */
-+ if (unlikely(offsets.switch_old_end)) {
-+ lib_ring_buffer_clear_noref(config, &buf->backend,
-+ subbuf_index(offsets.old - 1, chan));
-+ lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->tsc);
-+ }
-+
-+ /*
-+ * Populate new subbuffer.
-+ */
-+ if (unlikely(offsets.switch_new_start))
-+ lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc);
-+
-+ if (unlikely(offsets.switch_new_end))
-+ lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc);
-+
-+ ctx->slot_size = offsets.size;
-+ ctx->pre_offset = offsets.begin;
-+ ctx->buf_offset = offsets.begin + offsets.pre_header_padding;
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(lib_ring_buffer_reserve_slow);
-+
-+int __init init_lib_ring_buffer_frontend(void)
-+{
-+ int cpu;
-+
-+ for_each_possible_cpu(cpu)
-+ spin_lock_init(&per_cpu(ring_buffer_nohz_lock, cpu));
-+ return 0;
-+}
-+
-+module_init(init_lib_ring_buffer_frontend);
-+
-+void __exit exit_lib_ring_buffer_frontend(void)
-+{
-+}
-+
-+module_exit(exit_lib_ring_buffer_frontend);
-diff --git a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_iterator.c b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_iterator.c
-new file mode 100644
-index 0000000..1321b5f
---- /dev/null
-+++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_iterator.c
-@@ -0,0 +1,798 @@
-+/*
-+ * ring_buffer_iterator.c
-+ *
-+ * (C) Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Ring buffer and channel iterators. Get each event of a channel in order. Uses
-+ * a prio heap for per-cpu buffers, giving a O(log(NR_CPUS)) algorithmic
-+ * complexity for the "get next event" operation.
-+ *
-+ * Author:
-+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include "../../wrapper/ringbuffer/iterator.h"
-+#include <linux/jiffies.h>
-+#include <linux/delay.h>
-+#include <linux/module.h>
-+
-+/*
-+ * Safety factor taking into account internal kernel interrupt latency.
-+ * Assuming 250ms worse-case latency.
-+ */
-+#define MAX_SYSTEM_LATENCY 250
-+
-+/*
-+ * Maximum delta expected between trace clocks. At most 1 jiffy delta.
-+ */
-+#define MAX_CLOCK_DELTA (jiffies_to_usecs(1) * 1000)
-+
-+/**
-+ * lib_ring_buffer_get_next_record - Get the next record in a buffer.
-+ * @chan: channel
-+ * @buf: buffer
-+ *
-+ * Returns the size of the event read, -EAGAIN if buffer is empty, -ENODATA if
-+ * buffer is empty and finalized. The buffer must already be opened for reading.
-+ */
-+ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
-+ struct lib_ring_buffer *buf)
-+{
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ struct lib_ring_buffer_iter *iter = &buf->iter;
-+ int ret;
-+
-+restart:
-+ switch (iter->state) {
-+ case ITER_GET_SUBBUF:
-+ ret = lib_ring_buffer_get_next_subbuf(buf);
-+ if (ret && !ACCESS_ONCE(buf->finalized)
-+ && config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
-+ /*
-+ * Use "pull" scheme for global buffers. The reader
-+ * itself flushes the buffer to "pull" data not visible
-+ * to readers yet. Flush current subbuffer and re-try.
-+ *
-+ * Per-CPU buffers rather use a "push" scheme because
-+ * the IPI needed to flush all CPU's buffers is too
-+ * costly. In the "push" scheme, the reader waits for
-+ * the writer periodic deferrable timer to flush the
-+ * buffers (keeping track of a quiescent state
-+ * timestamp). Therefore, the writer "pushes" data out
-+ * of the buffers rather than letting the reader "pull"
-+ * data from the buffer.
-+ */
-+ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
-+ ret = lib_ring_buffer_get_next_subbuf(buf);
-+ }
-+ if (ret)
-+ return ret;
-+ iter->consumed = buf->cons_snapshot;
-+ iter->data_size = lib_ring_buffer_get_read_data_size(config, buf);
-+ iter->read_offset = iter->consumed;
-+ /* skip header */
-+ iter->read_offset += config->cb.subbuffer_header_size();
-+ iter->state = ITER_TEST_RECORD;
-+ goto restart;
-+ case ITER_TEST_RECORD:
-+ if (iter->read_offset - iter->consumed >= iter->data_size) {
-+ iter->state = ITER_PUT_SUBBUF;
-+ } else {
-+ CHAN_WARN_ON(chan, !config->cb.record_get);
-+ config->cb.record_get(config, chan, buf,
-+ iter->read_offset,
-+ &iter->header_len,
-+ &iter->payload_len,
-+ &iter->timestamp);
-+ iter->read_offset += iter->header_len;
-+ subbuffer_consume_record(config, &buf->backend);
-+ iter->state = ITER_NEXT_RECORD;
-+ return iter->payload_len;
-+ }
-+ goto restart;
-+ case ITER_NEXT_RECORD:
-+ iter->read_offset += iter->payload_len;
-+ iter->state = ITER_TEST_RECORD;
-+ goto restart;
-+ case ITER_PUT_SUBBUF:
-+ lib_ring_buffer_put_next_subbuf(buf);
-+ iter->state = ITER_GET_SUBBUF;
-+ goto restart;
-+ default:
-+ CHAN_WARN_ON(chan, 1); /* Should not happen */
-+ return -EPERM;
-+ }
-+}
-+EXPORT_SYMBOL_GPL(lib_ring_buffer_get_next_record);
-+
-+static int buf_is_higher(void *a, void *b)
-+{
-+ struct lib_ring_buffer *bufa = a;
-+ struct lib_ring_buffer *bufb = b;
-+
-+ /* Consider lowest timestamps to be at the top of the heap */
-+ return (bufa->iter.timestamp < bufb->iter.timestamp);
-+}
-+
-+static
-+void lib_ring_buffer_get_empty_buf_records(const struct lib_ring_buffer_config *config,
-+ struct channel *chan)
-+{
-+ struct lttng_ptr_heap *heap = &chan->iter.heap;
-+ struct lib_ring_buffer *buf, *tmp;
-+ ssize_t len;
-+
-+ list_for_each_entry_safe(buf, tmp, &chan->iter.empty_head,
-+ iter.empty_node) {
-+ len = lib_ring_buffer_get_next_record(chan, buf);
-+
-+ /*
-+ * Deal with -EAGAIN and -ENODATA.
-+ * len >= 0 means record contains data.
-+ * -EBUSY should never happen, because we support only one
-+ * reader.
-+ */
-+ switch (len) {
-+ case -EAGAIN:
-+ /* Keep node in empty list */
-+ break;
-+ case -ENODATA:
-+ /*
-+ * Buffer is finalized. Don't add to list of empty
-+ * buffer, because it has no more data to provide, ever.
-+ */
-+ list_del(&buf->iter.empty_node);
-+ break;
-+ case -EBUSY:
-+ CHAN_WARN_ON(chan, 1);
-+ break;
-+ default:
-+ /*
-+ * Insert buffer into the heap, remove from empty buffer
-+ * list.
-+ */
-+ CHAN_WARN_ON(chan, len < 0);
-+ list_del(&buf->iter.empty_node);
-+ CHAN_WARN_ON(chan, lttng_heap_insert(heap, buf));
-+ }
-+ }
-+}
-+
-+static
-+void lib_ring_buffer_wait_for_qs(const struct lib_ring_buffer_config *config,
-+ struct channel *chan)
-+{
-+ u64 timestamp_qs;
-+ unsigned long wait_msecs;
-+
-+ /*
-+ * No need to wait if no empty buffers are present.
-+ */
-+ if (list_empty(&chan->iter.empty_head))
-+ return;
-+
-+ timestamp_qs = config->cb.ring_buffer_clock_read(chan);
-+ /*
-+ * We need to consider previously empty buffers.
-+ * Do a get next buf record on each of them. Add them to
-+ * the heap if they have data. If at least one of them
-+ * don't have data, we need to wait for
-+ * switch_timer_interval + MAX_SYSTEM_LATENCY (so we are sure the
-+ * buffers have been switched either by the timer or idle entry) and
-+ * check them again, adding them if they have data.
-+ */
-+ lib_ring_buffer_get_empty_buf_records(config, chan);
-+
-+ /*
-+ * No need to wait if no empty buffers are present.
-+ */
-+ if (list_empty(&chan->iter.empty_head))
-+ return;
-+
-+ /*
-+ * We need to wait for the buffer switch timer to run. If the
-+ * CPU is idle, idle entry performed the switch.
-+ * TODO: we could optimize further by skipping the sleep if all
-+ * empty buffers belong to idle or offline cpus.
-+ */
-+ wait_msecs = jiffies_to_msecs(chan->switch_timer_interval);
-+ wait_msecs += MAX_SYSTEM_LATENCY;
-+ msleep(wait_msecs);
-+ lib_ring_buffer_get_empty_buf_records(config, chan);
-+ /*
-+ * Any buffer still in the empty list here cannot possibly
-+ * contain an event with a timestamp prior to "timestamp_qs".
-+ * The new quiescent state timestamp is the one we grabbed
-+ * before waiting for buffer data. It is therefore safe to
-+ * ignore empty buffers up to last_qs timestamp for fusion
-+ * merge.
-+ */
-+ chan->iter.last_qs = timestamp_qs;
-+}
-+
-+/**
-+ * channel_get_next_record - Get the next record in a channel.
-+ * @chan: channel
-+ * @ret_buf: the buffer in which the event is located (output)
-+ *
-+ * Returns the size of new current event, -EAGAIN if all buffers are empty,
-+ * -ENODATA if all buffers are empty and finalized. The channel must already be
-+ * opened for reading.
-+ */
-+
-+ssize_t channel_get_next_record(struct channel *chan,
-+ struct lib_ring_buffer **ret_buf)
-+{
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ struct lib_ring_buffer *buf;
-+ struct lttng_ptr_heap *heap;
-+ ssize_t len;
-+
-+ if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
-+ *ret_buf = channel_get_ring_buffer(config, chan, 0);
-+ return lib_ring_buffer_get_next_record(chan, *ret_buf);
-+ }
-+
-+ heap = &chan->iter.heap;
-+
-+ /*
-+ * get next record for topmost buffer.
-+ */
-+ buf = lttng_heap_maximum(heap);
-+ if (buf) {
-+ len = lib_ring_buffer_get_next_record(chan, buf);
-+ /*
-+ * Deal with -EAGAIN and -ENODATA.
-+ * len >= 0 means record contains data.
-+ */
-+ switch (len) {
-+ case -EAGAIN:
-+ buf->iter.timestamp = 0;
-+ list_add(&buf->iter.empty_node, &chan->iter.empty_head);
-+ /* Remove topmost buffer from the heap */
-+ CHAN_WARN_ON(chan, lttng_heap_remove(heap) != buf);
-+ break;
-+ case -ENODATA:
-+ /*
-+ * Buffer is finalized. Remove buffer from heap and
-+ * don't add to list of empty buffer, because it has no
-+ * more data to provide, ever.
-+ */
-+ CHAN_WARN_ON(chan, lttng_heap_remove(heap) != buf);
-+ break;
-+ case -EBUSY:
-+ CHAN_WARN_ON(chan, 1);
-+ break;
-+ default:
-+ /*
-+ * Reinsert buffer into the heap. Note that heap can be
-+ * partially empty, so we need to use
-+ * lttng_heap_replace_max().
-+ */
-+ CHAN_WARN_ON(chan, len < 0);
-+ CHAN_WARN_ON(chan, lttng_heap_replace_max(heap, buf) != buf);
-+ break;
-+ }
-+ }
-+
-+ buf = lttng_heap_maximum(heap);
-+ if (!buf || buf->iter.timestamp > chan->iter.last_qs) {
-+ /*
-+ * Deal with buffers previously showing no data.
-+ * Add buffers containing data to the heap, update
-+ * last_qs.
-+ */
-+ lib_ring_buffer_wait_for_qs(config, chan);
-+ }
-+
-+ *ret_buf = buf = lttng_heap_maximum(heap);
-+ if (buf) {
-+ /*
-+ * If this warning triggers, you probably need to check your
-+ * system interrupt latency. Typical causes: too many printk()
-+ * output going to a serial console with interrupts off.
-+ * Allow for MAX_CLOCK_DELTA ns timestamp delta going backward.
-+ * Observed on SMP KVM setups with trace_clock().
-+ */
-+ if (chan->iter.last_timestamp
-+ > (buf->iter.timestamp + MAX_CLOCK_DELTA)) {
-+ printk(KERN_WARNING "ring_buffer: timestamps going "
-+ "backward. Last time %llu ns, cpu %d, "
-+ "current time %llu ns, cpu %d, "
-+ "delta %llu ns.\n",
-+ chan->iter.last_timestamp, chan->iter.last_cpu,
-+ buf->iter.timestamp, buf->backend.cpu,
-+ chan->iter.last_timestamp - buf->iter.timestamp);
-+ CHAN_WARN_ON(chan, 1);
-+ }
-+ chan->iter.last_timestamp = buf->iter.timestamp;
-+ chan->iter.last_cpu = buf->backend.cpu;
-+ return buf->iter.payload_len;
-+ } else {
-+ /* Heap is empty */
-+ if (list_empty(&chan->iter.empty_head))
-+ return -ENODATA; /* All buffers finalized */
-+ else
-+ return -EAGAIN; /* Temporarily empty */
-+ }
-+}
-+EXPORT_SYMBOL_GPL(channel_get_next_record);
-+
-+static
-+void lib_ring_buffer_iterator_init(struct channel *chan, struct lib_ring_buffer *buf)
-+{
-+ if (buf->iter.allocated)
-+ return;
-+
-+ buf->iter.allocated = 1;
-+ if (chan->iter.read_open && !buf->iter.read_open) {
-+ CHAN_WARN_ON(chan, lib_ring_buffer_open_read(buf) != 0);
-+ buf->iter.read_open = 1;
-+ }
-+
-+ /* Add to list of buffers without any current record */
-+ if (chan->backend.config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-+ list_add(&buf->iter.empty_node, &chan->iter.empty_head);
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+static
-+int __cpuinit channel_iterator_cpu_hotplug(struct notifier_block *nb,
-+ unsigned long action,
-+ void *hcpu)
-+{
-+ unsigned int cpu = (unsigned long)hcpu;
-+ struct channel *chan = container_of(nb, struct channel,
-+ hp_iter_notifier);
-+ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+
-+ if (!chan->hp_iter_enable)
-+ return NOTIFY_DONE;
-+
-+ CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
-+
-+ switch (action) {
-+ case CPU_DOWN_FAILED:
-+ case CPU_DOWN_FAILED_FROZEN:
-+ case CPU_ONLINE:
-+ case CPU_ONLINE_FROZEN:
-+ lib_ring_buffer_iterator_init(chan, buf);
-+ return NOTIFY_OK;
-+ default:
-+ return NOTIFY_DONE;
-+ }
-+}
-+#endif
-+
-+int channel_iterator_init(struct channel *chan)
-+{
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ struct lib_ring_buffer *buf;
-+
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-+ int cpu, ret;
-+
-+ INIT_LIST_HEAD(&chan->iter.empty_head);
-+ ret = lttng_heap_init(&chan->iter.heap,
-+ num_possible_cpus(),
-+ GFP_KERNEL, buf_is_higher);
-+ if (ret)
-+ return ret;
-+ /*
-+ * In case of non-hotplug cpu, if the ring-buffer is allocated
-+ * in early initcall, it will not be notified of secondary cpus.
-+ * In that off case, we need to allocate for all possible cpus.
-+ */
-+#ifdef CONFIG_HOTPLUG_CPU
-+ chan->hp_iter_notifier.notifier_call =
-+ channel_iterator_cpu_hotplug;
-+ chan->hp_iter_notifier.priority = 10;
-+ register_cpu_notifier(&chan->hp_iter_notifier);
-+ get_online_cpus();
-+ for_each_online_cpu(cpu) {
-+ buf = per_cpu_ptr(chan->backend.buf, cpu);
-+ lib_ring_buffer_iterator_init(chan, buf);
-+ }
-+ chan->hp_iter_enable = 1;
-+ put_online_cpus();
-+#else
-+ for_each_possible_cpu(cpu) {
-+ buf = per_cpu_ptr(chan->backend.buf, cpu);
-+ lib_ring_buffer_iterator_init(chan, buf);
-+ }
-+#endif
-+ } else {
-+ buf = channel_get_ring_buffer(config, chan, 0);
-+ lib_ring_buffer_iterator_init(chan, buf);
-+ }
-+ return 0;
-+}
-+
-+void channel_iterator_unregister_notifiers(struct channel *chan)
-+{
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-+ chan->hp_iter_enable = 0;
-+ unregister_cpu_notifier(&chan->hp_iter_notifier);
-+ }
-+}
-+
-+void channel_iterator_free(struct channel *chan)
-+{
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-+ lttng_heap_free(&chan->iter.heap);
-+}
-+
-+int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf)
-+{
-+ struct channel *chan = buf->backend.chan;
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
-+ return lib_ring_buffer_open_read(buf);
-+}
-+EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_open);
-+
-+/*
-+ * Note: Iterators must not be mixed with other types of outputs, because an
-+ * iterator can leave the buffer in "GET" state, which is not consistent with
-+ * other types of output (mmap, splice, raw data read).
-+ */
-+void lib_ring_buffer_iterator_release(struct lib_ring_buffer *buf)
-+{
-+ lib_ring_buffer_release_read(buf);
-+}
-+EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_release);
-+
-+int channel_iterator_open(struct channel *chan)
-+{
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ struct lib_ring_buffer *buf;
-+ int ret = 0, cpu;
-+
-+ CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
-+
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-+ get_online_cpus();
-+ /* Allow CPU hotplug to keep track of opened reader */
-+ chan->iter.read_open = 1;
-+ for_each_channel_cpu(cpu, chan) {
-+ buf = channel_get_ring_buffer(config, chan, cpu);
-+ ret = lib_ring_buffer_iterator_open(buf);
-+ if (ret)
-+ goto error;
-+ buf->iter.read_open = 1;
-+ }
-+ put_online_cpus();
-+ } else {
-+ buf = channel_get_ring_buffer(config, chan, 0);
-+ ret = lib_ring_buffer_iterator_open(buf);
-+ }
-+ return ret;
-+error:
-+ /* Error should always happen on CPU 0, hence no close is required. */
-+ CHAN_WARN_ON(chan, cpu != 0);
-+ put_online_cpus();
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(channel_iterator_open);
-+
-+void channel_iterator_release(struct channel *chan)
-+{
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ struct lib_ring_buffer *buf;
-+ int cpu;
-+
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-+ get_online_cpus();
-+ for_each_channel_cpu(cpu, chan) {
-+ buf = channel_get_ring_buffer(config, chan, cpu);
-+ if (buf->iter.read_open) {
-+ lib_ring_buffer_iterator_release(buf);
-+ buf->iter.read_open = 0;
-+ }
-+ }
-+ chan->iter.read_open = 0;
-+ put_online_cpus();
-+ } else {
-+ buf = channel_get_ring_buffer(config, chan, 0);
-+ lib_ring_buffer_iterator_release(buf);
-+ }
-+}
-+EXPORT_SYMBOL_GPL(channel_iterator_release);
-+
-+void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf)
-+{
-+ struct channel *chan = buf->backend.chan;
-+
-+ if (buf->iter.state != ITER_GET_SUBBUF)
-+ lib_ring_buffer_put_next_subbuf(buf);
-+ buf->iter.state = ITER_GET_SUBBUF;
-+ /* Remove from heap (if present). */
-+ if (lttng_heap_cherrypick(&chan->iter.heap, buf))
-+ list_add(&buf->iter.empty_node, &chan->iter.empty_head);
-+ buf->iter.timestamp = 0;
-+ buf->iter.header_len = 0;
-+ buf->iter.payload_len = 0;
-+ buf->iter.consumed = 0;
-+ buf->iter.read_offset = 0;
-+ buf->iter.data_size = 0;
-+ /* Don't reset allocated and read_open */
-+}
-+
-+void channel_iterator_reset(struct channel *chan)
-+{
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ struct lib_ring_buffer *buf;
-+ int cpu;
-+
-+ /* Empty heap, put into empty_head */
-+ while ((buf = lttng_heap_remove(&chan->iter.heap)) != NULL)
-+ list_add(&buf->iter.empty_node, &chan->iter.empty_head);
-+
-+ for_each_channel_cpu(cpu, chan) {
-+ buf = channel_get_ring_buffer(config, chan, cpu);
-+ lib_ring_buffer_iterator_reset(buf);
-+ }
-+ /* Don't reset read_open */
-+ chan->iter.last_qs = 0;
-+ chan->iter.last_timestamp = 0;
-+ chan->iter.last_cpu = 0;
-+ chan->iter.len_left = 0;
-+}
-+
-+/*
-+ * Ring buffer payload extraction read() implementation.
-+ */
-+static
-+ssize_t channel_ring_buffer_file_read(struct file *filp,
-+ char __user *user_buf,
-+ size_t count,
-+ loff_t *ppos,
-+ struct channel *chan,
-+ struct lib_ring_buffer *buf,
-+ int fusionmerge)
-+{
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ size_t read_count = 0, read_offset;
-+ ssize_t len;
-+
-+ might_sleep();
-+ if (!access_ok(VERIFY_WRITE, user_buf, count))
-+ return -EFAULT;
-+
-+ /* Finish copy of previous record */
-+ if (*ppos != 0) {
-+ if (read_count < count) {
-+ len = chan->iter.len_left;
-+ read_offset = *ppos;
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU
-+ && fusionmerge)
-+ buf = lttng_heap_maximum(&chan->iter.heap);
-+ CHAN_WARN_ON(chan, !buf);
-+ goto skip_get_next;
-+ }
-+ }
-+
-+ while (read_count < count) {
-+ size_t copy_len, space_left;
-+
-+ if (fusionmerge)
-+ len = channel_get_next_record(chan, &buf);
-+ else
-+ len = lib_ring_buffer_get_next_record(chan, buf);
-+len_test:
-+ if (len < 0) {
-+ /*
-+ * Check if buffer is finalized (end of file).
-+ */
-+ if (len == -ENODATA) {
-+ /* A 0 read_count will tell about end of file */
-+ goto nodata;
-+ }
-+ if (filp->f_flags & O_NONBLOCK) {
-+ if (!read_count)
-+ read_count = -EAGAIN;
-+ goto nodata;
-+ } else {
-+ int error;
-+
-+ /*
-+ * No data available at the moment, return what
-+ * we got.
-+ */
-+ if (read_count)
-+ goto nodata;
-+
-+ /*
-+ * Wait for returned len to be >= 0 or -ENODATA.
-+ */
-+ if (fusionmerge)
-+ error = wait_event_interruptible(
-+ chan->read_wait,
-+ ((len = channel_get_next_record(chan,
-+ &buf)), len != -EAGAIN));
-+ else
-+ error = wait_event_interruptible(
-+ buf->read_wait,
-+ ((len = lib_ring_buffer_get_next_record(
-+ chan, buf)), len != -EAGAIN));
-+ CHAN_WARN_ON(chan, len == -EBUSY);
-+ if (error) {
-+ read_count = error;
-+ goto nodata;
-+ }
-+ CHAN_WARN_ON(chan, len < 0 && len != -ENODATA);
-+ goto len_test;
-+ }
-+ }
-+ read_offset = buf->iter.read_offset;
-+skip_get_next:
-+ space_left = count - read_count;
-+ if (len <= space_left) {
-+ copy_len = len;
-+ chan->iter.len_left = 0;
-+ *ppos = 0;
-+ } else {
-+ copy_len = space_left;
-+ chan->iter.len_left = len - copy_len;
-+ *ppos = read_offset + copy_len;
-+ }
-+ if (__lib_ring_buffer_copy_to_user(&buf->backend, read_offset,
-+ &user_buf[read_count],
-+ copy_len)) {
-+ /*
-+ * Leave the len_left and ppos values at their current
-+ * state, as we currently have a valid event to read.
-+ */
-+ return -EFAULT;
-+ }
-+ read_count += copy_len;
-+ };
-+ return read_count;
-+
-+nodata:
-+ *ppos = 0;
-+ chan->iter.len_left = 0;
-+ return read_count;
-+}
-+
-+/**
-+ * lib_ring_buffer_file_read - Read buffer record payload.
-+ * @filp: file structure pointer.
-+ * @buffer: user buffer to read data into.
-+ * @count: number of bytes to read.
-+ * @ppos: file read position.
-+ *
-+ * Returns a negative value on error, or the number of bytes read on success.
-+ * ppos is used to save the position _within the current record_ between calls
-+ * to read().
-+ */
-+static
-+ssize_t lib_ring_buffer_file_read(struct file *filp,
-+ char __user *user_buf,
-+ size_t count,
-+ loff_t *ppos)
-+{
-+ struct inode *inode = filp->f_dentry->d_inode;
-+ struct lib_ring_buffer *buf = inode->i_private;
-+ struct channel *chan = buf->backend.chan;
-+
-+ return channel_ring_buffer_file_read(filp, user_buf, count, ppos,
-+ chan, buf, 0);
-+}
-+
-+/**
-+ * channel_file_read - Read channel record payload.
-+ * @filp: file structure pointer.
-+ * @buffer: user buffer to read data into.
-+ * @count: number of bytes to read.
-+ * @ppos: file read position.
-+ *
-+ * Returns a negative value on error, or the number of bytes read on success.
-+ * ppos is used to save the position _within the current record_ between calls
-+ * to read().
-+ */
-+static
-+ssize_t channel_file_read(struct file *filp,
-+ char __user *user_buf,
-+ size_t count,
-+ loff_t *ppos)
-+{
-+ struct inode *inode = filp->f_dentry->d_inode;
-+ struct channel *chan = inode->i_private;
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+
-+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-+ return channel_ring_buffer_file_read(filp, user_buf, count,
-+ ppos, chan, NULL, 1);
-+ else {
-+ struct lib_ring_buffer *buf =
-+ channel_get_ring_buffer(config, chan, 0);
-+ return channel_ring_buffer_file_read(filp, user_buf, count,
-+ ppos, chan, buf, 0);
-+ }
-+}
-+
-+static
-+int lib_ring_buffer_file_open(struct inode *inode, struct file *file)
-+{
-+ struct lib_ring_buffer *buf = inode->i_private;
-+ int ret;
-+
-+ ret = lib_ring_buffer_iterator_open(buf);
-+ if (ret)
-+ return ret;
-+
-+ file->private_data = buf;
-+ ret = nonseekable_open(inode, file);
-+ if (ret)
-+ goto release_iter;
-+ return 0;
-+
-+release_iter:
-+ lib_ring_buffer_iterator_release(buf);
-+ return ret;
-+}
-+
-+static
-+int lib_ring_buffer_file_release(struct inode *inode, struct file *file)
-+{
-+ struct lib_ring_buffer *buf = inode->i_private;
-+
-+ lib_ring_buffer_iterator_release(buf);
-+ return 0;
-+}
-+
-+static
-+int channel_file_open(struct inode *inode, struct file *file)
-+{
-+ struct channel *chan = inode->i_private;
-+ int ret;
-+
-+ ret = channel_iterator_open(chan);
-+ if (ret)
-+ return ret;
-+
-+ file->private_data = chan;
-+ ret = nonseekable_open(inode, file);
-+ if (ret)
-+ goto release_iter;
-+ return 0;
-+
-+release_iter:
-+ channel_iterator_release(chan);
-+ return ret;
-+}
-+
-+static
-+int channel_file_release(struct inode *inode, struct file *file)
-+{
-+ struct channel *chan = inode->i_private;
-+
-+ channel_iterator_release(chan);
-+ return 0;
-+}
-+
-+const struct file_operations channel_payload_file_operations = {
-+ .owner = THIS_MODULE,
-+ .open = channel_file_open,
-+ .release = channel_file_release,
-+ .read = channel_file_read,
-+ .llseek = lib_ring_buffer_no_llseek,
-+};
-+EXPORT_SYMBOL_GPL(channel_payload_file_operations);
-+
-+const struct file_operations lib_ring_buffer_payload_file_operations = {
-+ .owner = THIS_MODULE,
-+ .open = lib_ring_buffer_file_open,
-+ .release = lib_ring_buffer_file_release,
-+ .read = lib_ring_buffer_file_read,
-+ .llseek = lib_ring_buffer_no_llseek,
-+};
-+EXPORT_SYMBOL_GPL(lib_ring_buffer_payload_file_operations);
-diff --git a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c
-new file mode 100644
-index 0000000..68221ee
---- /dev/null
-+++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c
-@@ -0,0 +1,115 @@
-+/*
-+ * ring_buffer_mmap.c
-+ *
-+ * Copyright (C) 2002-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
-+ * Copyright (C) 1999-2005 - Karim Yaghmour <karim@opersys.com>
-+ * Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Re-using content from kernel/relay.c.
-+ *
-+ * This file is released under the GPL v2.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/mm.h>
-+
-+#include "../../wrapper/ringbuffer/backend.h"
-+#include "../../wrapper/ringbuffer/frontend.h"
-+#include "../../wrapper/ringbuffer/vfs.h"
-+
-+/*
-+ * fault() vm_op implementation for ring buffer file mapping.
-+ */
-+static int lib_ring_buffer_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-+{
-+ struct lib_ring_buffer *buf = vma->vm_private_data;
-+ struct channel *chan = buf->backend.chan;
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ pgoff_t pgoff = vmf->pgoff;
-+ struct page **page;
-+ void **virt;
-+ unsigned long offset, sb_bindex;
-+
-+ if (!buf)
-+ return VM_FAULT_OOM;
-+
-+ /*
-+ * Verify that faults are only done on the range of pages owned by the
-+ * reader.
-+ */
-+ offset = pgoff << PAGE_SHIFT;
-+ sb_bindex = subbuffer_id_get_index(config, buf->backend.buf_rsb.id);
-+ if (!(offset >= buf->backend.array[sb_bindex]->mmap_offset
-+ && offset < buf->backend.array[sb_bindex]->mmap_offset +
-+ buf->backend.chan->backend.subbuf_size))
-+ return VM_FAULT_SIGBUS;
-+ /*
-+ * ring_buffer_read_get_page() gets the page in the current reader's
-+ * pages.
-+ */
-+ page = lib_ring_buffer_read_get_page(&buf->backend, offset, &virt);
-+ if (!*page)
-+ return VM_FAULT_SIGBUS;
-+ get_page(*page);
-+ vmf->page = *page;
-+
-+ return 0;
-+}
-+
-+/*
-+ * vm_ops for ring buffer file mappings.
-+ */
-+static const struct vm_operations_struct lib_ring_buffer_mmap_ops = {
-+ .fault = lib_ring_buffer_fault,
-+};
-+
-+/**
-+ * lib_ring_buffer_mmap_buf: - mmap channel buffer to process address space
-+ * @buf: ring buffer to map
-+ * @vma: vm_area_struct describing memory to be mapped
-+ *
-+ * Returns 0 if ok, negative on error
-+ *
-+ * Caller should already have grabbed mmap_sem.
-+ */
-+static int lib_ring_buffer_mmap_buf(struct lib_ring_buffer *buf,
-+ struct vm_area_struct *vma)
-+{
-+ unsigned long length = vma->vm_end - vma->vm_start;
-+ struct channel *chan = buf->backend.chan;
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ unsigned long mmap_buf_len;
-+
-+ if (config->output != RING_BUFFER_MMAP)
-+ return -EINVAL;
-+
-+ if (!buf)
-+ return -EBADF;
-+
-+ mmap_buf_len = chan->backend.buf_size;
-+ if (chan->backend.extra_reader_sb)
-+ mmap_buf_len += chan->backend.subbuf_size;
-+
-+ if (length != mmap_buf_len)
-+ return -EINVAL;
-+
-+ vma->vm_ops = &lib_ring_buffer_mmap_ops;
-+ vma->vm_flags |= VM_DONTEXPAND;
-+ vma->vm_private_data = buf;
-+
-+ return 0;
-+}
-+
-+/**
-+ * lib_ring_buffer_mmap - mmap file op
-+ * @filp: the file
-+ * @vma: the vma describing what to map
-+ *
-+ * Calls upon lib_ring_buffer_mmap_buf() to map the file into user space.
-+ */
-+int lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma)
-+{
-+ struct lib_ring_buffer *buf = filp->private_data;
-+ return lib_ring_buffer_mmap_buf(buf, vma);
-+}
-+EXPORT_SYMBOL_GPL(lib_ring_buffer_mmap);
-diff --git a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_splice.c b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_splice.c
-new file mode 100644
-index 0000000..ded18ba
---- /dev/null
-+++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_splice.c
-@@ -0,0 +1,202 @@
-+/*
-+ * ring_buffer_splice.c
-+ *
-+ * Copyright (C) 2002-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
-+ * Copyright (C) 1999-2005 - Karim Yaghmour <karim@opersys.com>
-+ * Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Re-using content from kernel/relay.c.
-+ *
-+ * This file is released under the GPL v2.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/fs.h>
-+
-+#include "../../wrapper/splice.h"
-+#include "../../wrapper/ringbuffer/backend.h"
-+#include "../../wrapper/ringbuffer/frontend.h"
-+#include "../../wrapper/ringbuffer/vfs.h"
-+
-+#if 0
-+#define printk_dbg(fmt, args...) printk(fmt, args)
-+#else
-+#define printk_dbg(fmt, args...)
-+#endif
-+
-+loff_t lib_ring_buffer_no_llseek(struct file *file, loff_t offset, int origin)
-+{
-+ return -ESPIPE;
-+}
-+
-+/*
-+ * Release pages from the buffer so splice pipe_to_file can move them.
-+ * Called after the pipe has been populated with buffer pages.
-+ */
-+static void lib_ring_buffer_pipe_buf_release(struct pipe_inode_info *pipe,
-+ struct pipe_buffer *pbuf)
-+{
-+ __free_page(pbuf->page);
-+}
-+
-+static const struct pipe_buf_operations ring_buffer_pipe_buf_ops = {
-+ .can_merge = 0,
-+ .map = generic_pipe_buf_map,
-+ .unmap = generic_pipe_buf_unmap,
-+ .confirm = generic_pipe_buf_confirm,
-+ .release = lib_ring_buffer_pipe_buf_release,
-+ .steal = generic_pipe_buf_steal,
-+ .get = generic_pipe_buf_get,
-+};
-+
-+/*
-+ * Page release operation after splice pipe_to_file ends.
-+ */
-+static void lib_ring_buffer_page_release(struct splice_pipe_desc *spd,
-+ unsigned int i)
-+{
-+ __free_page(spd->pages[i]);
-+}
-+
-+/*
-+ * subbuf_splice_actor - splice up to one subbuf's worth of data
-+ */
-+static int subbuf_splice_actor(struct file *in,
-+ loff_t *ppos,
-+ struct pipe_inode_info *pipe,
-+ size_t len,
-+ unsigned int flags)
-+{
-+ struct lib_ring_buffer *buf = in->private_data;
-+ struct channel *chan = buf->backend.chan;
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ unsigned int poff, subbuf_pages, nr_pages;
-+ struct page *pages[PIPE_DEF_BUFFERS];
-+ struct partial_page partial[PIPE_DEF_BUFFERS];
-+ struct splice_pipe_desc spd = {
-+ .pages = pages,
-+ .nr_pages = 0,
-+ .partial = partial,
-+ .flags = flags,
-+ .ops = &ring_buffer_pipe_buf_ops,
-+ .spd_release = lib_ring_buffer_page_release,
-+ };
-+ unsigned long consumed_old, roffset;
-+ unsigned long bytes_avail;
-+
-+ /*
-+ * Check that a GET_SUBBUF ioctl has been done before.
-+ */
-+ WARN_ON(atomic_long_read(&buf->active_readers) != 1);
-+ consumed_old = lib_ring_buffer_get_consumed(config, buf);
-+ consumed_old += *ppos;
-+
-+ /*
-+ * Adjust read len, if longer than what is available.
-+ * Max read size is 1 subbuffer due to get_subbuf/put_subbuf for
-+ * protection.
-+ */
-+ bytes_avail = chan->backend.subbuf_size;
-+ WARN_ON(bytes_avail > chan->backend.buf_size);
-+ len = min_t(size_t, len, bytes_avail);
-+ subbuf_pages = bytes_avail >> PAGE_SHIFT;
-+ nr_pages = min_t(unsigned int, subbuf_pages, PIPE_DEF_BUFFERS);
-+ roffset = consumed_old & PAGE_MASK;
-+ poff = consumed_old & ~PAGE_MASK;
-+ printk_dbg(KERN_DEBUG "SPLICE actor len %zu pos %zd write_pos %ld\n",
-+ len, (ssize_t)*ppos, lib_ring_buffer_get_offset(config, buf));
-+
-+ for (; spd.nr_pages < nr_pages; spd.nr_pages++) {
-+ unsigned int this_len;
-+ struct page **page, *new_page;
-+ void **virt;
-+
-+ if (!len)
-+ break;
-+ printk_dbg(KERN_DEBUG "SPLICE actor loop len %zu roffset %ld\n",
-+ len, roffset);
-+
-+ /*
-+ * We have to replace the page we are moving into the splice
-+ * pipe.
-+ */
-+ new_page = alloc_pages_node(cpu_to_node(max(buf->backend.cpu,
-+ 0)),
-+ GFP_KERNEL | __GFP_ZERO, 0);
-+ if (!new_page)
-+ break;
-+
-+ this_len = PAGE_SIZE - poff;
-+ page = lib_ring_buffer_read_get_page(&buf->backend, roffset, &virt);
-+ spd.pages[spd.nr_pages] = *page;
-+ *page = new_page;
-+ *virt = page_address(new_page);
-+ spd.partial[spd.nr_pages].offset = poff;
-+ spd.partial[spd.nr_pages].len = this_len;
-+
-+ poff = 0;
-+ roffset += PAGE_SIZE;
-+ len -= this_len;
-+ }
-+
-+ if (!spd.nr_pages)
-+ return 0;
-+
-+ return wrapper_splice_to_pipe(pipe, &spd);
-+}
-+
-+ssize_t lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
-+ struct pipe_inode_info *pipe, size_t len,
-+ unsigned int flags)
-+{
-+ struct lib_ring_buffer *buf = in->private_data;
-+ struct channel *chan = buf->backend.chan;
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ ssize_t spliced;
-+ int ret;
-+
-+ if (config->output != RING_BUFFER_SPLICE)
-+ return -EINVAL;
-+
-+ /*
-+ * We require ppos and length to be page-aligned for performance reasons
-+ * (no page copy). Size is known using the ioctl
-+ * RING_BUFFER_GET_PADDED_SUBBUF_SIZE, which is page-size padded.
-+ * We fail when the ppos or len passed is not page-sized, because splice
-+ * is not allowed to copy more than the length passed as parameter (so
-+ * the ABI does not let us silently copy more than requested to include
-+ * padding).
-+ */
-+ if (*ppos != PAGE_ALIGN(*ppos) || len != PAGE_ALIGN(len))
-+ return -EINVAL;
-+
-+ ret = 0;
-+ spliced = 0;
-+
-+ printk_dbg(KERN_DEBUG "SPLICE read len %zu pos %zd\n", len,
-+ (ssize_t)*ppos);
-+ while (len && !spliced) {
-+ ret = subbuf_splice_actor(in, ppos, pipe, len, flags);
-+ printk_dbg(KERN_DEBUG "SPLICE read loop ret %d\n", ret);
-+ if (ret < 0)
-+ break;
-+ else if (!ret) {
-+ if (flags & SPLICE_F_NONBLOCK)
-+ ret = -EAGAIN;
-+ break;
-+ }
-+
-+ *ppos += ret;
-+ if (ret > len)
-+ len = 0;
-+ else
-+ len -= ret;
-+ spliced += ret;
-+ }
-+
-+ if (spliced)
-+ return spliced;
-+
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(lib_ring_buffer_splice_read);
-diff --git a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_vfs.c b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_vfs.c
-new file mode 100644
-index 0000000..1708ffd
---- /dev/null
-+++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_vfs.c
-@@ -0,0 +1,387 @@
-+/*
-+ * ring_buffer_vfs.c
-+ *
-+ * Copyright (C) 2009-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Ring Buffer VFS file operations.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/fs.h>
-+#include <linux/compat.h>
-+
-+#include "../../wrapper/ringbuffer/backend.h"
-+#include "../../wrapper/ringbuffer/frontend.h"
-+#include "../../wrapper/ringbuffer/vfs.h"
-+#include "../../wrapper/poll.h"
-+
-+static int put_ulong(unsigned long val, unsigned long arg)
-+{
-+ return put_user(val, (unsigned long __user *)arg);
-+}
-+
-+#ifdef CONFIG_COMPAT
-+static int compat_put_ulong(compat_ulong_t val, unsigned long arg)
-+{
-+ return put_user(val, (compat_ulong_t __user *)compat_ptr(arg));
-+}
-+#endif
-+
-+/**
-+ * lib_ring_buffer_open - ring buffer open file operation
-+ * @inode: opened inode
-+ * @file: opened file
-+ *
-+ * Open implementation. Makes sure only one open instance of a buffer is
-+ * done at a given moment.
-+ */
-+int lib_ring_buffer_open(struct inode *inode, struct file *file)
-+{
-+ struct lib_ring_buffer *buf = inode->i_private;
-+ int ret;
-+
-+ ret = lib_ring_buffer_open_read(buf);
-+ if (ret)
-+ return ret;
-+
-+ file->private_data = buf;
-+ ret = nonseekable_open(inode, file);
-+ if (ret)
-+ goto release_read;
-+ return 0;
-+
-+release_read:
-+ lib_ring_buffer_release_read(buf);
-+ return ret;
-+}
-+
-+/**
-+ * lib_ring_buffer_release - ring buffer release file operation
-+ * @inode: opened inode
-+ * @file: opened file
-+ *
-+ * Release implementation.
-+ */
-+int lib_ring_buffer_release(struct inode *inode, struct file *file)
-+{
-+ struct lib_ring_buffer *buf = file->private_data;
-+
-+ lib_ring_buffer_release_read(buf);
-+
-+ return 0;
-+}
-+
-+/**
-+ * lib_ring_buffer_poll - ring buffer poll file operation
-+ * @filp: the file
-+ * @wait: poll table
-+ *
-+ * Poll implementation.
-+ */
-+unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait)
-+{
-+ unsigned int mask = 0;
-+ struct lib_ring_buffer *buf = filp->private_data;
-+ struct channel *chan = buf->backend.chan;
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+ int finalized, disabled;
-+
-+ if (filp->f_mode & FMODE_READ) {
-+ poll_wait_set_exclusive(wait);
-+ poll_wait(filp, &buf->read_wait, wait);
-+
-+ finalized = lib_ring_buffer_is_finalized(config, buf);
-+ disabled = lib_ring_buffer_channel_is_disabled(chan);
-+
-+ /*
-+ * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
-+ * finalized load before offsets loads.
-+ */
-+ WARN_ON(atomic_long_read(&buf->active_readers) != 1);
-+retry:
-+ if (disabled)
-+ return POLLERR;
-+
-+ if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf), chan)
-+ - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf), chan)
-+ == 0) {
-+ if (finalized)
-+ return POLLHUP;
-+ else {
-+ /*
-+ * The memory barriers
-+ * __wait_event()/wake_up_interruptible() take
-+ * care of "raw_spin_is_locked" memory ordering.
-+ */
-+ if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
-+ goto retry;
-+ else
-+ return 0;
-+ }
-+ } else {
-+ if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf),
-+ chan)
-+ - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf),
-+ chan)
-+ >= chan->backend.buf_size)
-+ return POLLPRI | POLLRDBAND;
-+ else
-+ return POLLIN | POLLRDNORM;
-+ }
-+ }
-+ return mask;
-+}
-+
-+/**
-+ * lib_ring_buffer_ioctl - control ring buffer reader synchronization
-+ *
-+ * @filp: the file
-+ * @cmd: the command
-+ * @arg: command arg
-+ *
-+ * This ioctl implements commands necessary for producer/consumer
-+ * and flight recorder reader interaction :
-+ * RING_BUFFER_GET_NEXT_SUBBUF
-+ * Get the next sub-buffer that can be read. It never blocks.
-+ * RING_BUFFER_PUT_NEXT_SUBBUF
-+ * Release the currently read sub-buffer.
-+ * RING_BUFFER_GET_SUBBUF_SIZE
-+ * returns the size of the current sub-buffer.
-+ * RING_BUFFER_GET_MAX_SUBBUF_SIZE
-+ * returns the maximum size for sub-buffers.
-+ * RING_BUFFER_GET_NUM_SUBBUF
-+ * returns the number of reader-visible sub-buffers in the per cpu
-+ * channel (for mmap).
-+ * RING_BUFFER_GET_MMAP_READ_OFFSET
-+ * returns the offset of the subbuffer belonging to the reader.
-+ * Should only be used for mmap clients.
-+ */
-+long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-+{
-+ struct lib_ring_buffer *buf = filp->private_data;
-+ struct channel *chan = buf->backend.chan;
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+
-+ if (lib_ring_buffer_channel_is_disabled(chan))
-+ return -EIO;
-+
-+ switch (cmd) {
-+ case RING_BUFFER_SNAPSHOT:
-+ return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
-+ &buf->prod_snapshot);
-+ case RING_BUFFER_SNAPSHOT_GET_CONSUMED:
-+ return put_ulong(buf->cons_snapshot, arg);
-+ case RING_BUFFER_SNAPSHOT_GET_PRODUCED:
-+ return put_ulong(buf->prod_snapshot, arg);
-+ case RING_BUFFER_GET_SUBBUF:
-+ {
-+ unsigned long uconsume;
-+ long ret;
-+
-+ ret = get_user(uconsume, (unsigned long __user *) arg);
-+ if (ret)
-+ return ret; /* will return -EFAULT */
-+ ret = lib_ring_buffer_get_subbuf(buf, uconsume);
-+ if (!ret) {
-+ /* Set file position to zero at each successful "get" */
-+ filp->f_pos = 0;
-+ }
-+ return ret;
-+ }
-+ case RING_BUFFER_PUT_SUBBUF:
-+ lib_ring_buffer_put_subbuf(buf);
-+ return 0;
-+
-+ case RING_BUFFER_GET_NEXT_SUBBUF:
-+ {
-+ long ret;
-+
-+ ret = lib_ring_buffer_get_next_subbuf(buf);
-+ if (!ret) {
-+ /* Set file position to zero at each successful "get" */
-+ filp->f_pos = 0;
-+ }
-+ return ret;
-+ }
-+ case RING_BUFFER_PUT_NEXT_SUBBUF:
-+ lib_ring_buffer_put_next_subbuf(buf);
-+ return 0;
-+ case RING_BUFFER_GET_SUBBUF_SIZE:
-+ return put_ulong(lib_ring_buffer_get_read_data_size(config, buf),
-+ arg);
-+ case RING_BUFFER_GET_PADDED_SUBBUF_SIZE:
-+ {
-+ unsigned long size;
-+
-+ size = lib_ring_buffer_get_read_data_size(config, buf);
-+ size = PAGE_ALIGN(size);
-+ return put_ulong(size, arg);
-+ }
-+ case RING_BUFFER_GET_MAX_SUBBUF_SIZE:
-+ return put_ulong(chan->backend.subbuf_size, arg);
-+ case RING_BUFFER_GET_MMAP_LEN:
-+ {
-+ unsigned long mmap_buf_len;
-+
-+ if (config->output != RING_BUFFER_MMAP)
-+ return -EINVAL;
-+ mmap_buf_len = chan->backend.buf_size;
-+ if (chan->backend.extra_reader_sb)
-+ mmap_buf_len += chan->backend.subbuf_size;
-+ if (mmap_buf_len > INT_MAX)
-+ return -EFBIG;
-+ return put_ulong(mmap_buf_len, arg);
-+ }
-+ case RING_BUFFER_GET_MMAP_READ_OFFSET:
-+ {
-+ unsigned long sb_bindex;
-+
-+ if (config->output != RING_BUFFER_MMAP)
-+ return -EINVAL;
-+ sb_bindex = subbuffer_id_get_index(config,
-+ buf->backend.buf_rsb.id);
-+ return put_ulong(buf->backend.array[sb_bindex]->mmap_offset,
-+ arg);
-+ }
-+ case RING_BUFFER_FLUSH:
-+ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
-+ return 0;
-+ default:
-+ return -ENOIOCTLCMD;
-+ }
-+}
-+
-+#ifdef CONFIG_COMPAT
-+long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
-+ unsigned long arg)
-+{
-+ struct lib_ring_buffer *buf = filp->private_data;
-+ struct channel *chan = buf->backend.chan;
-+ const struct lib_ring_buffer_config *config = chan->backend.config;
-+
-+ if (lib_ring_buffer_channel_is_disabled(chan))
-+ return -EIO;
-+
-+ switch (cmd) {
-+ case RING_BUFFER_SNAPSHOT:
-+ return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
-+ &buf->prod_snapshot);
-+ case RING_BUFFER_SNAPSHOT_GET_CONSUMED:
-+ return compat_put_ulong(buf->cons_snapshot, arg);
-+ case RING_BUFFER_SNAPSHOT_GET_PRODUCED:
-+ return compat_put_ulong(buf->prod_snapshot, arg);
-+ case RING_BUFFER_GET_SUBBUF:
-+ {
-+ __u32 uconsume;
-+ unsigned long consume;
-+ long ret;
-+
-+ ret = get_user(uconsume, (__u32 __user *) arg);
-+ if (ret)
-+ return ret; /* will return -EFAULT */
-+ consume = buf->cons_snapshot;
-+ consume &= ~0xFFFFFFFFL;
-+ consume |= uconsume;
-+ ret = lib_ring_buffer_get_subbuf(buf, consume);
-+ if (!ret) {
-+ /* Set file position to zero at each successful "get" */
-+ filp->f_pos = 0;
-+ }
-+ return ret;
-+ }
-+ case RING_BUFFER_PUT_SUBBUF:
-+ lib_ring_buffer_put_subbuf(buf);
-+ return 0;
-+
-+ case RING_BUFFER_GET_NEXT_SUBBUF:
-+ {
-+ long ret;
-+
-+ ret = lib_ring_buffer_get_next_subbuf(buf);
-+ if (!ret) {
-+ /* Set file position to zero at each successful "get" */
-+ filp->f_pos = 0;
-+ }
-+ return ret;
-+ }
-+ case RING_BUFFER_PUT_NEXT_SUBBUF:
-+ lib_ring_buffer_put_next_subbuf(buf);
-+ return 0;
-+ case RING_BUFFER_GET_SUBBUF_SIZE:
-+ {
-+ unsigned long data_size;
-+
-+ data_size = lib_ring_buffer_get_read_data_size(config, buf);
-+ if (data_size > UINT_MAX)
-+ return -EFBIG;
-+ return put_ulong(data_size, arg);
-+ }
-+ case RING_BUFFER_GET_PADDED_SUBBUF_SIZE:
-+ {
-+ unsigned long size;
-+
-+ size = lib_ring_buffer_get_read_data_size(config, buf);
-+ size = PAGE_ALIGN(size);
-+ if (size > UINT_MAX)
-+ return -EFBIG;
-+ return put_ulong(size, arg);
-+ }
-+ case RING_BUFFER_GET_MAX_SUBBUF_SIZE:
-+ if (chan->backend.subbuf_size > UINT_MAX)
-+ return -EFBIG;
-+ return put_ulong(chan->backend.subbuf_size, arg);
-+ case RING_BUFFER_GET_MMAP_LEN:
-+ {
-+ unsigned long mmap_buf_len;
-+
-+ if (config->output != RING_BUFFER_MMAP)
-+ return -EINVAL;
-+ mmap_buf_len = chan->backend.buf_size;
-+ if (chan->backend.extra_reader_sb)
-+ mmap_buf_len += chan->backend.subbuf_size;
-+ if (mmap_buf_len > UINT_MAX)
-+ return -EFBIG;
-+ return put_ulong(mmap_buf_len, arg);
-+ }
-+ case RING_BUFFER_GET_MMAP_READ_OFFSET:
-+ {
-+ unsigned long sb_bindex, read_offset;
-+
-+ if (config->output != RING_BUFFER_MMAP)
-+ return -EINVAL;
-+ sb_bindex = subbuffer_id_get_index(config,
-+ buf->backend.buf_rsb.id);
-+ read_offset = buf->backend.array[sb_bindex]->mmap_offset;
-+ if (read_offset > UINT_MAX)
-+ return -EINVAL;
-+ return put_ulong(read_offset, arg);
-+ }
-+ case RING_BUFFER_FLUSH:
-+ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
-+ return 0;
-+ default:
-+ return -ENOIOCTLCMD;
-+ }
-+}
-+#endif
-+
-+const struct file_operations lib_ring_buffer_file_operations = {
-+ .owner = THIS_MODULE,
-+ .open = lib_ring_buffer_open,
-+ .release = lib_ring_buffer_release,
-+ .poll = lib_ring_buffer_poll,
-+ .splice_read = lib_ring_buffer_splice_read,
-+ .mmap = lib_ring_buffer_mmap,
-+ .unlocked_ioctl = lib_ring_buffer_ioctl,
-+ .llseek = lib_ring_buffer_no_llseek,
-+#ifdef CONFIG_COMPAT
-+ .compat_ioctl = lib_ring_buffer_compat_ioctl,
-+#endif
-+};
-+EXPORT_SYMBOL_GPL(lib_ring_buffer_file_operations);
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("Ring Buffer Library VFS");
-diff --git a/drivers/staging/lttng/lib/ringbuffer/vatomic.h b/drivers/staging/lttng/lib/ringbuffer/vatomic.h
-new file mode 100644
-index 0000000..b944dd6
---- /dev/null
-+++ b/drivers/staging/lttng/lib/ringbuffer/vatomic.h
-@@ -0,0 +1,85 @@
-+#ifndef _LINUX_RING_BUFFER_VATOMIC_H
-+#define _LINUX_RING_BUFFER_VATOMIC_H
-+
-+/*
-+ * linux/ringbuffer/vatomic.h
-+ *
-+ * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <asm/atomic.h>
-+#include <asm/local.h>
-+
-+/*
-+ * Same data type (long) accessed differently depending on configuration.
-+ * v field is for non-atomic access (protected by mutual exclusion).
-+ * In the fast-path, the ring_buffer_config structure is constant, so the
-+ * compiler can statically select the appropriate branch.
-+ * local_t is used for per-cpu and per-thread buffers.
-+ * atomic_long_t is used for globally shared buffers.
-+ */
-+union v_atomic {
-+ local_t l;
-+ atomic_long_t a;
-+ long v;
-+};
-+
-+static inline
-+long v_read(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
-+{
-+ if (config->sync == RING_BUFFER_SYNC_PER_CPU)
-+ return local_read(&v_a->l);
-+ else
-+ return atomic_long_read(&v_a->a);
-+}
-+
-+static inline
-+void v_set(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
-+ long v)
-+{
-+ if (config->sync == RING_BUFFER_SYNC_PER_CPU)
-+ local_set(&v_a->l, v);
-+ else
-+ atomic_long_set(&v_a->a, v);
-+}
-+
-+static inline
-+void v_add(const struct lib_ring_buffer_config *config, long v, union v_atomic *v_a)
-+{
-+ if (config->sync == RING_BUFFER_SYNC_PER_CPU)
-+ local_add(v, &v_a->l);
-+ else
-+ atomic_long_add(v, &v_a->a);
-+}
-+
-+static inline
-+void v_inc(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
-+{
-+ if (config->sync == RING_BUFFER_SYNC_PER_CPU)
-+ local_inc(&v_a->l);
-+ else
-+ atomic_long_inc(&v_a->a);
-+}
-+
-+/*
-+ * Non-atomic decrement. Only used by reader, apply to reader-owned subbuffer.
-+ */
-+static inline
-+void _v_dec(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
-+{
-+ --v_a->v;
-+}
-+
-+static inline
-+long v_cmpxchg(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
-+ long old, long _new)
-+{
-+ if (config->sync == RING_BUFFER_SYNC_PER_CPU)
-+ return local_cmpxchg(&v_a->l, old, _new);
-+ else
-+ return atomic_long_cmpxchg(&v_a->a, old, _new);
-+}
-+
-+#endif /* _LINUX_RING_BUFFER_VATOMIC_H */
-diff --git a/drivers/staging/lttng/lib/ringbuffer/vfs.h b/drivers/staging/lttng/lib/ringbuffer/vfs.h
-new file mode 100644
-index 0000000..d073e4c
---- /dev/null
-+++ b/drivers/staging/lttng/lib/ringbuffer/vfs.h
-@@ -0,0 +1,89 @@
-+#ifndef _LINUX_RING_BUFFER_VFS_H
-+#define _LINUX_RING_BUFFER_VFS_H
-+
-+/*
-+ * linux/ringbuffer/vfs.h
-+ *
-+ * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Wait-free ring buffer VFS file operations.
-+ *
-+ * Author:
-+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/fs.h>
-+#include <linux/poll.h>
-+
-+/* VFS API */
-+
-+extern const struct file_operations lib_ring_buffer_file_operations;
-+
-+/*
-+ * Internal file operations.
-+ */
-+
-+int lib_ring_buffer_open(struct inode *inode, struct file *file);
-+int lib_ring_buffer_release(struct inode *inode, struct file *file);
-+unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait);
-+ssize_t lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
-+ struct pipe_inode_info *pipe, size_t len,
-+ unsigned int flags);
-+int lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma);
-+
-+/* Ring Buffer ioctl() and ioctl numbers */
-+long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-+#ifdef CONFIG_COMPAT
-+long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
-+ unsigned long arg);
-+#endif
-+
-+/*
-+ * Use RING_BUFFER_GET_NEXT_SUBBUF / RING_BUFFER_PUT_NEXT_SUBBUF to read and
-+ * consume sub-buffers sequentially.
-+ *
-+ * Reading sub-buffers without consuming them can be performed with:
-+ *
-+ * RING_BUFFER_SNAPSHOT
-+ * RING_BUFFER_SNAPSHOT_GET_CONSUMED
-+ * RING_BUFFER_SNAPSHOT_GET_PRODUCED
-+ *
-+ * to get the offset range to consume, and then by passing each sub-buffer
-+ * offset to RING_BUFFER_GET_SUBBUF, read the sub-buffer, and then release it
-+ * with RING_BUFFER_PUT_SUBBUF.
-+ *
-+ * Note that the "snapshot" API can be used to read the sub-buffer in reverse
-+ * order, which is useful for flight recorder snapshots.
-+ */
-+
-+/* Get a snapshot of the current ring buffer producer and consumer positions */
-+#define RING_BUFFER_SNAPSHOT _IO(0xF6, 0x00)
-+/* Get the consumer position (iteration start) */
-+#define RING_BUFFER_SNAPSHOT_GET_CONSUMED _IOR(0xF6, 0x01, unsigned long)
-+/* Get the producer position (iteration end) */
-+#define RING_BUFFER_SNAPSHOT_GET_PRODUCED _IOR(0xF6, 0x02, unsigned long)
-+/* Get exclusive read access to the specified sub-buffer position */
-+#define RING_BUFFER_GET_SUBBUF _IOW(0xF6, 0x03, unsigned long)
-+/* Release exclusive sub-buffer access */
-+#define RING_BUFFER_PUT_SUBBUF _IO(0xF6, 0x04)
-+
-+/* Get exclusive read access to the next sub-buffer that can be read. */
-+#define RING_BUFFER_GET_NEXT_SUBBUF _IO(0xF6, 0x05)
-+/* Release exclusive sub-buffer access, move consumer forward. */
-+#define RING_BUFFER_PUT_NEXT_SUBBUF _IO(0xF6, 0x06)
-+/* returns the size of the current sub-buffer, without padding (for mmap). */
-+#define RING_BUFFER_GET_SUBBUF_SIZE _IOR(0xF6, 0x07, unsigned long)
-+/* returns the size of the current sub-buffer, with padding (for splice). */
-+#define RING_BUFFER_GET_PADDED_SUBBUF_SIZE _IOR(0xF6, 0x08, unsigned long)
-+/* returns the maximum size for sub-buffers. */
-+#define RING_BUFFER_GET_MAX_SUBBUF_SIZE _IOR(0xF6, 0x09, unsigned long)
-+/* returns the length to mmap. */
-+#define RING_BUFFER_GET_MMAP_LEN _IOR(0xF6, 0x0A, unsigned long)
-+/* returns the offset of the subbuffer belonging to the mmap reader. */
-+#define RING_BUFFER_GET_MMAP_READ_OFFSET _IOR(0xF6, 0x0B, unsigned long)
-+/* flush the current sub-buffer */
-+#define RING_BUFFER_FLUSH _IO(0xF6, 0x0C)
-+
-+#endif /* _LINUX_RING_BUFFER_VFS_H */
---
-1.7.9
-
diff --git a/patches.lttng/0002-lttng-lib-portable-bitfield-read-write-header.patch b/patches.lttng/0002-lttng-lib-portable-bitfield-read-write-header.patch
deleted file mode 100644
index 1bca3f78fe6..00000000000
--- a/patches.lttng/0002-lttng-lib-portable-bitfield-read-write-header.patch
+++ /dev/null
@@ -1,421 +0,0 @@
-From 7430010ec58a189e2ef81d504417299779f47663 Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 28 Nov 2011 07:42:10 -0500
-Subject: lttng lib: portable bitfield read/write header
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- drivers/staging/lttng/lib/bitfield.h | 400 ++++++++++++++++++++++++++++++++++
- 1 files changed, 400 insertions(+), 0 deletions(-)
- create mode 100644 drivers/staging/lttng/lib/bitfield.h
-
-diff --git a/drivers/staging/lttng/lib/bitfield.h b/drivers/staging/lttng/lib/bitfield.h
-new file mode 100644
-index 0000000..861e6dc
---- /dev/null
-+++ b/drivers/staging/lttng/lib/bitfield.h
-@@ -0,0 +1,400 @@
-+#ifndef _BABELTRACE_BITFIELD_H
-+#define _BABELTRACE_BITFIELD_H
-+
-+/*
-+ * BabelTrace
-+ *
-+ * Bitfields read/write functions.
-+ *
-+ * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this software and associated documentation files (the "Software"), to deal
-+ * in the Software without restriction, including without limitation the rights
-+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-+ * copies of the Software, and to permit persons to whom the Software is
-+ * furnished to do so, subject to the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ */
-+
-+#include "../ltt-endian.h"
-+
-+#ifndef CHAR_BIT
-+#define CHAR_BIT 8
-+#endif
-+
-+/* We can't shift a int from 32 bit, >> 32 and << 32 on int is undefined */
-+#define _bt_piecewise_rshift(_v, _shift) \
-+({ \
-+ typeof(_v) ___v = (_v); \
-+ typeof(_shift) ___shift = (_shift); \
-+ unsigned long sb = (___shift) / (sizeof(___v) * CHAR_BIT - 1); \
-+ unsigned long final = (___shift) % (sizeof(___v) * CHAR_BIT - 1); \
-+ \
-+ for (; sb; sb--) \
-+ ___v >>= sizeof(___v) * CHAR_BIT - 1; \
-+ ___v >>= final; \
-+})
-+
-+#define _bt_piecewise_lshift(_v, _shift) \
-+({ \
-+ typeof(_v) ___v = (_v); \
-+ typeof(_shift) ___shift = (_shift); \
-+ unsigned long sb = (___shift) / (sizeof(___v) * CHAR_BIT - 1); \
-+ unsigned long final = (___shift) % (sizeof(___v) * CHAR_BIT - 1); \
-+ \
-+ for (; sb; sb--) \
-+ ___v <<= sizeof(___v) * CHAR_BIT - 1; \
-+ ___v <<= final; \
-+})
-+
-+#define _bt_is_signed_type(type) (((type)(-1)) < 0)
-+
-+#define _bt_unsigned_cast(type, v) \
-+({ \
-+ (sizeof(v) < sizeof(type)) ? \
-+ ((type) (v)) & (~(~(type) 0 << (sizeof(v) * CHAR_BIT))) : \
-+ (type) (v); \
-+})
-+
-+/*
-+ * bt_bitfield_write - write integer to a bitfield in native endianness
-+ *
-+ * Save integer to the bitfield, which starts at the "start" bit, has "len"
-+ * bits.
-+ * The inside of a bitfield is from high bits to low bits.
-+ * Uses native endianness.
-+ * For unsigned "v", pad MSB with 0 if bitfield is larger than v.
-+ * For signed "v", sign-extend v if bitfield is larger than v.
-+ *
-+ * On little endian, bytes are placed from the less significant to the most
-+ * significant. Also, consecutive bitfields are placed from lower bits to higher
-+ * bits.
-+ *
-+ * On big endian, bytes are places from most significant to less significant.
-+ * Also, consecutive bitfields are placed from higher to lower bits.
-+ */
-+
-+#define _bt_bitfield_write_le(_ptr, type, _start, _length, _v) \
-+do { \
-+ typeof(_v) __v = (_v); \
-+ type *__ptr = (void *) (_ptr); \
-+ unsigned long __start = (_start), __length = (_length); \
-+ type mask, cmask; \
-+ unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
-+ unsigned long start_unit, end_unit, this_unit; \
-+ unsigned long end, cshift; /* cshift is "complement shift" */ \
-+ \
-+ if (!__length) \
-+ break; \
-+ \
-+ end = __start + __length; \
-+ start_unit = __start / ts; \
-+ end_unit = (end + (ts - 1)) / ts; \
-+ \
-+ /* Trim v high bits */ \
-+ if (__length < sizeof(__v) * CHAR_BIT) \
-+ __v &= ~((~(typeof(__v)) 0) << __length); \
-+ \
-+ /* We can now append v with a simple "or", shift it piece-wise */ \
-+ this_unit = start_unit; \
-+ if (start_unit == end_unit - 1) { \
-+ mask = ~((~(type) 0) << (__start % ts)); \
-+ if (end % ts) \
-+ mask |= (~(type) 0) << (end % ts); \
-+ cmask = (type) __v << (__start % ts); \
-+ cmask &= ~mask; \
-+ __ptr[this_unit] &= mask; \
-+ __ptr[this_unit] |= cmask; \
-+ break; \
-+ } \
-+ if (__start % ts) { \
-+ cshift = __start % ts; \
-+ mask = ~((~(type) 0) << cshift); \
-+ cmask = (type) __v << cshift; \
-+ cmask &= ~mask; \
-+ __ptr[this_unit] &= mask; \
-+ __ptr[this_unit] |= cmask; \
-+ __v = _bt_piecewise_rshift(__v, ts - cshift); \
-+ __start += ts - cshift; \
-+ this_unit++; \
-+ } \
-+ for (; this_unit < end_unit - 1; this_unit++) { \
-+ __ptr[this_unit] = (type) __v; \
-+ __v = _bt_piecewise_rshift(__v, ts); \
-+ __start += ts; \
-+ } \
-+ if (end % ts) { \
-+ mask = (~(type) 0) << (end % ts); \
-+ cmask = (type) __v; \
-+ cmask &= ~mask; \
-+ __ptr[this_unit] &= mask; \
-+ __ptr[this_unit] |= cmask; \
-+ } else \
-+ __ptr[this_unit] = (type) __v; \
-+} while (0)
-+
-+#define _bt_bitfield_write_be(_ptr, type, _start, _length, _v) \
-+do { \
-+ typeof(_v) __v = (_v); \
-+ type *__ptr = (void *) (_ptr); \
-+ unsigned long __start = (_start), __length = (_length); \
-+ type mask, cmask; \
-+ unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
-+ unsigned long start_unit, end_unit, this_unit; \
-+ unsigned long end, cshift; /* cshift is "complement shift" */ \
-+ \
-+ if (!__length) \
-+ break; \
-+ \
-+ end = __start + __length; \
-+ start_unit = __start / ts; \
-+ end_unit = (end + (ts - 1)) / ts; \
-+ \
-+ /* Trim v high bits */ \
-+ if (__length < sizeof(__v) * CHAR_BIT) \
-+ __v &= ~((~(typeof(__v)) 0) << __length); \
-+ \
-+ /* We can now append v with a simple "or", shift it piece-wise */ \
-+ this_unit = end_unit - 1; \
-+ if (start_unit == end_unit - 1) { \
-+ mask = ~((~(type) 0) << ((ts - (end % ts)) % ts)); \
-+ if (__start % ts) \
-+ mask |= (~((type) 0)) << (ts - (__start % ts)); \
-+ cmask = (type) __v << ((ts - (end % ts)) % ts); \
-+ cmask &= ~mask; \
-+ __ptr[this_unit] &= mask; \
-+ __ptr[this_unit] |= cmask; \
-+ break; \
-+ } \
-+ if (end % ts) { \
-+ cshift = end % ts; \
-+ mask = ~((~(type) 0) << (ts - cshift)); \
-+ cmask = (type) __v << (ts - cshift); \
-+ cmask &= ~mask; \
-+ __ptr[this_unit] &= mask; \
-+ __ptr[this_unit] |= cmask; \
-+ __v = _bt_piecewise_rshift(__v, cshift); \
-+ end -= cshift; \
-+ this_unit--; \
-+ } \
-+ for (; (long) this_unit >= (long) start_unit + 1; this_unit--) { \
-+ __ptr[this_unit] = (type) __v; \
-+ __v = _bt_piecewise_rshift(__v, ts); \
-+ end -= ts; \
-+ } \
-+ if (__start % ts) { \
-+ mask = (~(type) 0) << (ts - (__start % ts)); \
-+ cmask = (type) __v; \
-+ cmask &= ~mask; \
-+ __ptr[this_unit] &= mask; \
-+ __ptr[this_unit] |= cmask; \
-+ } else \
-+ __ptr[this_unit] = (type) __v; \
-+} while (0)
-+
-+/*
-+ * bt_bitfield_write - write integer to a bitfield in native endianness
-+ * bt_bitfield_write_le - write integer to a bitfield in little endian
-+ * bt_bitfield_write_be - write integer to a bitfield in big endian
-+ */
-+
-+#if (__BYTE_ORDER == __LITTLE_ENDIAN)
-+
-+#define bt_bitfield_write(ptr, type, _start, _length, _v) \
-+ _bt_bitfield_write_le(ptr, type, _start, _length, _v)
-+
-+#define bt_bitfield_write_le(ptr, type, _start, _length, _v) \
-+ _bt_bitfield_write_le(ptr, type, _start, _length, _v)
-+
-+#define bt_bitfield_write_be(ptr, type, _start, _length, _v) \
-+ _bt_bitfield_write_be(ptr, unsigned char, _start, _length, _v)
-+
-+#elif (__BYTE_ORDER == __BIG_ENDIAN)
-+
-+#define bt_bitfield_write(ptr, type, _start, _length, _v) \
-+ _bt_bitfield_write_be(ptr, type, _start, _length, _v)
-+
-+#define bt_bitfield_write_le(ptr, type, _start, _length, _v) \
-+ _bt_bitfield_write_le(ptr, unsigned char, _start, _length, _v)
-+
-+#define bt_bitfield_write_be(ptr, type, _start, _length, _v) \
-+ _bt_bitfield_write_be(ptr, type, _start, _length, _v)
-+
-+#else /* (BYTE_ORDER == PDP_ENDIAN) */
-+
-+#error "Byte order not supported"
-+
-+#endif
-+
-+#define _bt_bitfield_read_le(_ptr, type, _start, _length, _vptr) \
-+do { \
-+ typeof(*(_vptr)) *__vptr = (_vptr); \
-+ typeof(*__vptr) __v; \
-+ type *__ptr = (void *) (_ptr); \
-+ unsigned long __start = (_start), __length = (_length); \
-+ type mask, cmask; \
-+ unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
-+ unsigned long start_unit, end_unit, this_unit; \
-+ unsigned long end, cshift; /* cshift is "complement shift" */ \
-+ \
-+ if (!__length) { \
-+ *__vptr = 0; \
-+ break; \
-+ } \
-+ \
-+ end = __start + __length; \
-+ start_unit = __start / ts; \
-+ end_unit = (end + (ts - 1)) / ts; \
-+ \
-+ this_unit = end_unit - 1; \
-+ if (_bt_is_signed_type(typeof(__v)) \
-+ && (__ptr[this_unit] & ((type) 1 << ((end % ts ? : ts) - 1)))) \
-+ __v = ~(typeof(__v)) 0; \
-+ else \
-+ __v = 0; \
-+ if (start_unit == end_unit - 1) { \
-+ cmask = __ptr[this_unit]; \
-+ cmask >>= (__start % ts); \
-+ if ((end - __start) % ts) { \
-+ mask = ~((~(type) 0) << (end - __start)); \
-+ cmask &= mask; \
-+ } \
-+ __v = _bt_piecewise_lshift(__v, end - __start); \
-+ __v |= _bt_unsigned_cast(typeof(__v), cmask); \
-+ *__vptr = __v; \
-+ break; \
-+ } \
-+ if (end % ts) { \
-+ cshift = end % ts; \
-+ mask = ~((~(type) 0) << cshift); \
-+ cmask = __ptr[this_unit]; \
-+ cmask &= mask; \
-+ __v = _bt_piecewise_lshift(__v, cshift); \
-+ __v |= _bt_unsigned_cast(typeof(__v), cmask); \
-+ end -= cshift; \
-+ this_unit--; \
-+ } \
-+ for (; (long) this_unit >= (long) start_unit + 1; this_unit--) { \
-+ __v = _bt_piecewise_lshift(__v, ts); \
-+ __v |= _bt_unsigned_cast(typeof(__v), __ptr[this_unit]);\
-+ end -= ts; \
-+ } \
-+ if (__start % ts) { \
-+ mask = ~((~(type) 0) << (ts - (__start % ts))); \
-+ cmask = __ptr[this_unit]; \
-+ cmask >>= (__start % ts); \
-+ cmask &= mask; \
-+ __v = _bt_piecewise_lshift(__v, ts - (__start % ts)); \
-+ __v |= _bt_unsigned_cast(typeof(__v), cmask); \
-+ } else { \
-+ __v = _bt_piecewise_lshift(__v, ts); \
-+ __v |= _bt_unsigned_cast(typeof(__v), __ptr[this_unit]);\
-+ } \
-+ *__vptr = __v; \
-+} while (0)
-+
-+#define _bt_bitfield_read_be(_ptr, type, _start, _length, _vptr) \
-+do { \
-+ typeof(*(_vptr)) *__vptr = (_vptr); \
-+ typeof(*__vptr) __v; \
-+ type *__ptr = (void *) (_ptr); \
-+ unsigned long __start = (_start), __length = (_length); \
-+ type mask, cmask; \
-+ unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
-+ unsigned long start_unit, end_unit, this_unit; \
-+ unsigned long end, cshift; /* cshift is "complement shift" */ \
-+ \
-+ if (!__length) { \
-+ *__vptr = 0; \
-+ break; \
-+ } \
-+ \
-+ end = __start + __length; \
-+ start_unit = __start / ts; \
-+ end_unit = (end + (ts - 1)) / ts; \
-+ \
-+ this_unit = start_unit; \
-+ if (_bt_is_signed_type(typeof(__v)) \
-+ && (__ptr[this_unit] & ((type) 1 << (ts - (__start % ts) - 1)))) \
-+ __v = ~(typeof(__v)) 0; \
-+ else \
-+ __v = 0; \
-+ if (start_unit == end_unit - 1) { \
-+ cmask = __ptr[this_unit]; \
-+ cmask >>= (ts - (end % ts)) % ts; \
-+ if ((end - __start) % ts) { \
-+ mask = ~((~(type) 0) << (end - __start)); \
-+ cmask &= mask; \
-+ } \
-+ __v = _bt_piecewise_lshift(__v, end - __start); \
-+ __v |= _bt_unsigned_cast(typeof(__v), cmask); \
-+ *__vptr = __v; \
-+ break; \
-+ } \
-+ if (__start % ts) { \
-+ cshift = __start % ts; \
-+ mask = ~((~(type) 0) << (ts - cshift)); \
-+ cmask = __ptr[this_unit]; \
-+ cmask &= mask; \
-+ __v = _bt_piecewise_lshift(__v, ts - cshift); \
-+ __v |= _bt_unsigned_cast(typeof(__v), cmask); \
-+ __start += ts - cshift; \
-+ this_unit++; \
-+ } \
-+ for (; this_unit < end_unit - 1; this_unit++) { \
-+ __v = _bt_piecewise_lshift(__v, ts); \
-+ __v |= _bt_unsigned_cast(typeof(__v), __ptr[this_unit]);\
-+ __start += ts; \
-+ } \
-+ if (end % ts) { \
-+ mask = ~((~(type) 0) << (end % ts)); \
-+ cmask = __ptr[this_unit]; \
-+ cmask >>= ts - (end % ts); \
-+ cmask &= mask; \
-+ __v = _bt_piecewise_lshift(__v, end % ts); \
-+ __v |= _bt_unsigned_cast(typeof(__v), cmask); \
-+ } else { \
-+ __v = _bt_piecewise_lshift(__v, ts); \
-+ __v |= _bt_unsigned_cast(typeof(__v), __ptr[this_unit]);\
-+ } \
-+ *__vptr = __v; \
-+} while (0)
-+
-+/*
-+ * bt_bitfield_read - read integer from a bitfield in native endianness
-+ * bt_bitfield_read_le - read integer from a bitfield in little endian
-+ * bt_bitfield_read_be - read integer from a bitfield in big endian
-+ */
-+
-+#if (__BYTE_ORDER == __LITTLE_ENDIAN)
-+
-+#define bt_bitfield_read(_ptr, type, _start, _length, _vptr) \
-+ _bt_bitfield_read_le(_ptr, type, _start, _length, _vptr)
-+
-+#define bt_bitfield_read_le(_ptr, type, _start, _length, _vptr) \
-+ _bt_bitfield_read_le(_ptr, type, _start, _length, _vptr)
-+
-+#define bt_bitfield_read_be(_ptr, type, _start, _length, _vptr) \
-+ _bt_bitfield_read_be(_ptr, unsigned char, _start, _length, _vptr)
-+
-+#elif (__BYTE_ORDER == __BIG_ENDIAN)
-+
-+#define bt_bitfield_read(_ptr, type, _start, _length, _vptr) \
-+ _bt_bitfield_read_be(_ptr, type, _start, _length, _vptr)
-+
-+#define bt_bitfield_read_le(_ptr, type, _start, _length, _vptr) \
-+ _bt_bitfield_read_le(_ptr, unsigned char, _start, _length, _vptr)
-+
-+#define bt_bitfield_read_be(_ptr, type, _start, _length, _vptr) \
-+ _bt_bitfield_read_be(_ptr, type, _start, _length, _vptr)
-+
-+#else /* (__BYTE_ORDER == __PDP_ENDIAN) */
-+
-+#error "Byte order not supported"
-+
-+#endif
-+
-+#endif /* _BABELTRACE_BITFIELD_H */
---
-1.7.9
-
diff --git a/patches.lttng/0003-lttng-BUILD_RUNTIME_BUG_ON.patch b/patches.lttng/0003-lttng-BUILD_RUNTIME_BUG_ON.patch
deleted file mode 100644
index a94b2ea0721..00000000000
--- a/patches.lttng/0003-lttng-BUILD_RUNTIME_BUG_ON.patch
+++ /dev/null
@@ -1,50 +0,0 @@
-From a643061cb861ebb18e0292b7510dd9879b598ae0 Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 28 Nov 2011 07:42:11 -0500
-Subject: lttng: BUILD_RUNTIME_BUG_ON
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- drivers/staging/lttng/lib/bug.h | 29 +++++++++++++++++++++++++++++
- 1 files changed, 29 insertions(+), 0 deletions(-)
- create mode 100644 drivers/staging/lttng/lib/bug.h
-
-diff --git a/drivers/staging/lttng/lib/bug.h b/drivers/staging/lttng/lib/bug.h
-new file mode 100644
-index 0000000..8243cc9
---- /dev/null
-+++ b/drivers/staging/lttng/lib/bug.h
-@@ -0,0 +1,29 @@
-+#ifndef _LTTNG_BUG_H
-+#define _LTTNG_BUG_H
-+
-+/*
-+ * lib/bug.h
-+ *
-+ * (C) Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+/**
-+ * BUILD_RUNTIME_BUG_ON - check condition at build (if constant) or runtime
-+ * @condition: the condition which should be false.
-+ *
-+ * If the condition is a constant and true, the compiler will generate a build
-+ * error. If the condition is not constant, a BUG will be triggered at runtime
-+ * if the condition is ever true. If the condition is constant and false, no
-+ * code is emitted.
-+ */
-+#define BUILD_RUNTIME_BUG_ON(condition) \
-+ do { \
-+ if (__builtin_constant_p(condition)) \
-+ BUILD_BUG_ON(condition); \
-+ else \
-+ BUG_ON(condition); \
-+ } while (0)
-+
-+#endif
---
-1.7.9
-
diff --git a/patches.lttng/0004-lttng-offset-alignment-header.patch b/patches.lttng/0004-lttng-offset-alignment-header.patch
deleted file mode 100644
index 4ddd02356b9..00000000000
--- a/patches.lttng/0004-lttng-offset-alignment-header.patch
+++ /dev/null
@@ -1,82 +0,0 @@
-From 2d31597d37ec8842b9575005190fde2898764628 Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 28 Nov 2011 07:42:12 -0500
-Subject: lttng: offset alignment header
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- drivers/staging/lttng/lib/align.h | 61 +++++++++++++++++++++++++++++++++++++
- 1 files changed, 61 insertions(+), 0 deletions(-)
- create mode 100644 drivers/staging/lttng/lib/align.h
-
-diff --git a/drivers/staging/lttng/lib/align.h b/drivers/staging/lttng/lib/align.h
-new file mode 100644
-index 0000000..0b86100
---- /dev/null
-+++ b/drivers/staging/lttng/lib/align.h
-@@ -0,0 +1,61 @@
-+#ifndef _LTTNG_ALIGN_H
-+#define _LTTNG_ALIGN_H
-+
-+/*
-+ * lib/align.h
-+ *
-+ * (C) Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/types.h>
-+#include "bug.h"
-+
-+#define ALIGN_FLOOR(x, a) __ALIGN_FLOOR_MASK(x, (typeof(x)) (a) - 1)
-+#define __ALIGN_FLOOR_MASK(x, mask) ((x) & ~(mask))
-+#define PTR_ALIGN_FLOOR(p, a) \
-+ ((typeof(p)) ALIGN_FLOOR((unsigned long) (p), a))
-+
-+/*
-+ * Align pointer on natural object alignment.
-+ */
-+#define object_align(obj) PTR_ALIGN(obj, __alignof__(*(obj)))
-+#define object_align_floor(obj) PTR_ALIGN_FLOOR(obj, __alignof__(*(obj)))
-+
-+/**
-+ * offset_align - Calculate the offset needed to align an object on its natural
-+ * alignment towards higher addresses.
-+ * @align_drift: object offset from an "alignment"-aligned address.
-+ * @alignment: natural object alignment. Must be non-zero, power of 2.
-+ *
-+ * Returns the offset that must be added to align towards higher
-+ * addresses.
-+ */
-+#define offset_align(align_drift, alignment) \
-+ ({ \
-+ BUILD_RUNTIME_BUG_ON((alignment) == 0 \
-+ || ((alignment) & ((alignment) - 1))); \
-+ (((alignment) - (align_drift)) & ((alignment) - 1)); \
-+ })
-+
-+/**
-+ * offset_align_floor - Calculate the offset needed to align an object
-+ * on its natural alignment towards lower addresses.
-+ * @align_drift: object offset from an "alignment"-aligned address.
-+ * @alignment: natural object alignment. Must be non-zero, power of 2.
-+ *
-+ * Returns the offset that must be substracted to align towards lower addresses.
-+ */
-+#define offset_align_floor(align_drift, alignment) \
-+ ({ \
-+ BUILD_RUNTIME_BUG_ON((alignment) == 0 \
-+ || ((alignment) & ((alignment) - 1))); \
-+ (((align_drift) - (alignment)) & ((alignment) - 1); \
-+ })
-+
-+#endif /* __KERNEL__ */
-+
-+#endif
---
-1.7.9
-
diff --git a/patches.lttng/0005-lttng-libs-add-Makefile.patch b/patches.lttng/0005-lttng-libs-add-Makefile.patch
deleted file mode 100644
index 2c2b608ab14..00000000000
--- a/patches.lttng/0005-lttng-libs-add-Makefile.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From 848afbd72ed02db1ed20ae7917c241983df7b314 Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 28 Nov 2011 07:42:13 -0500
-Subject: lttng libs: add Makefile
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- drivers/staging/lttng/lib/Makefile | 11 +++++++++++
- 1 files changed, 11 insertions(+), 0 deletions(-)
- create mode 100644 drivers/staging/lttng/lib/Makefile
-
-diff --git a/drivers/staging/lttng/lib/Makefile b/drivers/staging/lttng/lib/Makefile
-new file mode 100644
-index 0000000..e5735ec
---- /dev/null
-+++ b/drivers/staging/lttng/lib/Makefile
-@@ -0,0 +1,11 @@
-+obj-m += lib-ring-buffer.o
-+
-+lib-ring-buffer-objs := \
-+ ringbuffer/ring_buffer_backend.o \
-+ ringbuffer/ring_buffer_frontend.o \
-+ ringbuffer/ring_buffer_iterator.o \
-+ ringbuffer/ring_buffer_vfs.o \
-+ ringbuffer/ring_buffer_splice.o \
-+ ringbuffer/ring_buffer_mmap.o \
-+ prio_heap/lttng_prio_heap.o \
-+ ../wrapper/splice.o
---
-1.7.9
-
diff --git a/patches.lttng/0006-lttng-wrappers.patch b/patches.lttng/0006-lttng-wrappers.patch
deleted file mode 100644
index f3185635401..00000000000
--- a/patches.lttng/0006-lttng-wrappers.patch
+++ /dev/null
@@ -1,625 +0,0 @@
-From 69e1242eaab021eb6a4110a671af1e443fbf704d Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 28 Nov 2011 07:42:14 -0500
-Subject: lttng wrappers
-
-Implement wrappers for compatibility with older kernel versions and
-kernels with had the libringbuffer (old) patchset applied.
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- drivers/staging/lttng/wrapper/ftrace.h | 70 ++++++++++++++++++
- drivers/staging/lttng/wrapper/inline_memcpy.h | 11 +++
- drivers/staging/lttng/wrapper/kallsyms.h | 28 +++++++
- drivers/staging/lttng/wrapper/perf.h | 32 ++++++++
- drivers/staging/lttng/wrapper/poll.h | 14 ++++
- drivers/staging/lttng/wrapper/ringbuffer/api.h | 1 +
- drivers/staging/lttng/wrapper/ringbuffer/backend.h | 1 +
- .../lttng/wrapper/ringbuffer/backend_internal.h | 2 +
- .../lttng/wrapper/ringbuffer/backend_types.h | 1 +
- drivers/staging/lttng/wrapper/ringbuffer/config.h | 1 +
- .../staging/lttng/wrapper/ringbuffer/frontend.h | 1 +
- .../lttng/wrapper/ringbuffer/frontend_api.h | 1 +
- .../lttng/wrapper/ringbuffer/frontend_internal.h | 1 +
- .../lttng/wrapper/ringbuffer/frontend_types.h | 1 +
- .../staging/lttng/wrapper/ringbuffer/iterator.h | 1 +
- drivers/staging/lttng/wrapper/ringbuffer/nohz.h | 1 +
- drivers/staging/lttng/wrapper/ringbuffer/vatomic.h | 1 +
- drivers/staging/lttng/wrapper/ringbuffer/vfs.h | 1 +
- drivers/staging/lttng/wrapper/spinlock.h | 26 +++++++
- drivers/staging/lttng/wrapper/splice.c | 46 ++++++++++++
- drivers/staging/lttng/wrapper/splice.h | 23 ++++++
- drivers/staging/lttng/wrapper/trace-clock.h | 75 ++++++++++++++++++++
- drivers/staging/lttng/wrapper/uuid.h | 29 ++++++++
- drivers/staging/lttng/wrapper/vmalloc.h | 49 +++++++++++++
- 24 files changed, 417 insertions(+), 0 deletions(-)
- create mode 100644 drivers/staging/lttng/wrapper/ftrace.h
- create mode 100644 drivers/staging/lttng/wrapper/inline_memcpy.h
- create mode 100644 drivers/staging/lttng/wrapper/kallsyms.h
- create mode 100644 drivers/staging/lttng/wrapper/perf.h
- create mode 100644 drivers/staging/lttng/wrapper/poll.h
- create mode 100644 drivers/staging/lttng/wrapper/ringbuffer/api.h
- create mode 100644 drivers/staging/lttng/wrapper/ringbuffer/backend.h
- create mode 100644 drivers/staging/lttng/wrapper/ringbuffer/backend_internal.h
- create mode 100644 drivers/staging/lttng/wrapper/ringbuffer/backend_types.h
- create mode 100644 drivers/staging/lttng/wrapper/ringbuffer/config.h
- create mode 100644 drivers/staging/lttng/wrapper/ringbuffer/frontend.h
- create mode 100644 drivers/staging/lttng/wrapper/ringbuffer/frontend_api.h
- create mode 100644 drivers/staging/lttng/wrapper/ringbuffer/frontend_internal.h
- create mode 100644 drivers/staging/lttng/wrapper/ringbuffer/frontend_types.h
- create mode 100644 drivers/staging/lttng/wrapper/ringbuffer/iterator.h
- create mode 100644 drivers/staging/lttng/wrapper/ringbuffer/nohz.h
- create mode 100644 drivers/staging/lttng/wrapper/ringbuffer/vatomic.h
- create mode 100644 drivers/staging/lttng/wrapper/ringbuffer/vfs.h
- create mode 100644 drivers/staging/lttng/wrapper/spinlock.h
- create mode 100644 drivers/staging/lttng/wrapper/splice.c
- create mode 100644 drivers/staging/lttng/wrapper/splice.h
- create mode 100644 drivers/staging/lttng/wrapper/trace-clock.h
- create mode 100644 drivers/staging/lttng/wrapper/uuid.h
- create mode 100644 drivers/staging/lttng/wrapper/vmalloc.h
-
-diff --git a/drivers/staging/lttng/wrapper/ftrace.h b/drivers/staging/lttng/wrapper/ftrace.h
-new file mode 100644
-index 0000000..ace33c5
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/ftrace.h
-@@ -0,0 +1,70 @@
-+#ifndef _LTT_WRAPPER_FTRACE_H
-+#define _LTT_WRAPPER_FTRACE_H
-+
-+/*
-+ * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
-+ *
-+ * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
-+ * available, else we need to have a kernel that exports this function to GPL
-+ * modules.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/ftrace.h>
-+
-+#ifdef CONFIG_KALLSYMS
-+
-+#include <linux/kallsyms.h>
-+#include "kallsyms.h"
-+
-+static inline
-+int wrapper_register_ftrace_function_probe(char *glob,
-+ struct ftrace_probe_ops *ops, void *data)
-+{
-+ int (*register_ftrace_function_probe_sym)(char *glob,
-+ struct ftrace_probe_ops *ops, void *data);
-+
-+ register_ftrace_function_probe_sym = (void *) kallsyms_lookup_funcptr("register_ftrace_function_probe");
-+ if (register_ftrace_function_probe_sym) {
-+ return register_ftrace_function_probe_sym(glob, ops, data);
-+ } else {
-+ printk(KERN_WARNING "LTTng: register_ftrace_function_probe symbol lookup failed.\n");
-+ return -EINVAL;
-+ }
-+}
-+
-+static inline
-+void wrapper_unregister_ftrace_function_probe(char *glob,
-+ struct ftrace_probe_ops *ops, void *data)
-+{
-+ void (*unregister_ftrace_function_probe_sym)(char *glob,
-+ struct ftrace_probe_ops *ops, void *data);
-+
-+ unregister_ftrace_function_probe_sym = (void *) kallsyms_lookup_funcptr("unregister_ftrace_function_probe");
-+ if (unregister_ftrace_function_probe_sym) {
-+ unregister_ftrace_function_probe_sym(glob, ops, data);
-+ } else {
-+ printk(KERN_WARNING "LTTng: unregister_ftrace_function_probe symbol lookup failed.\n");
-+ WARN_ON(1);
-+ }
-+}
-+
-+#else
-+
-+static inline
-+int wrapper_register_ftrace_function_probe(char *glob,
-+ struct ftrace_probe_ops *ops, void *data)
-+{
-+ return register_ftrace_function_probe(glob, ops, data);
-+}
-+
-+static inline
-+void wrapper_unregister_ftrace_function_probe(char *glob,
-+ struct ftrace_probe_ops *ops, void *data)
-+{
-+ return unregister_ftrace_function_probe(glob, ops, data);
-+}
-+#endif
-+
-+#endif /* _LTT_WRAPPER_FTRACE_H */
-diff --git a/drivers/staging/lttng/wrapper/inline_memcpy.h b/drivers/staging/lttng/wrapper/inline_memcpy.h
-new file mode 100644
-index 0000000..33150cd
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/inline_memcpy.h
-@@ -0,0 +1,11 @@
-+/*
-+ * wrapper/inline_memcpy.h
-+ *
-+ * Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#if !defined(__HAVE_ARCH_INLINE_MEMCPY) && !defined(inline_memcpy)
-+#define inline_memcpy memcpy
-+#endif
-diff --git a/drivers/staging/lttng/wrapper/kallsyms.h b/drivers/staging/lttng/wrapper/kallsyms.h
-new file mode 100644
-index 0000000..bb45f38
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/kallsyms.h
-@@ -0,0 +1,28 @@
-+#ifndef _LTT_WRAPPER_KALLSYMS_H
-+#define _LTT_WRAPPER_KALLSYMS_H
-+
-+/*
-+ * Copyright (C) 2011 Avik Sil (avik.sil@linaro.org)
-+ *
-+ * wrapper around kallsyms_lookup_name. Implements arch-dependent code for
-+ * arches where the address of the start of the function body is different
-+ * from the pointer which can be used to call the function, e.g. ARM THUMB2.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+static inline
-+unsigned long kallsyms_lookup_funcptr(const char *name)
-+{
-+ unsigned long addr;
-+
-+ addr = kallsyms_lookup_name(name);
-+#ifdef CONFIG_ARM
-+#ifdef CONFIG_THUMB2_KERNEL
-+ if (addr)
-+ addr |= 1; /* set bit 0 in address for thumb mode */
-+#endif
-+#endif
-+ return addr;
-+}
-+#endif /* _LTT_WRAPPER_KALLSYMS_H */
-diff --git a/drivers/staging/lttng/wrapper/perf.h b/drivers/staging/lttng/wrapper/perf.h
-new file mode 100644
-index 0000000..9a6dbfc
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/perf.h
-@@ -0,0 +1,32 @@
-+#ifndef _LTT_WRAPPER_PERF_H
-+#define _LTT_WRAPPER_PERF_H
-+
-+/*
-+ * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/perf_event.h>
-+
-+#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99))
-+static inline struct perf_event *
-+wrapper_perf_event_create_kernel_counter(struct perf_event_attr *attr,
-+ int cpu,
-+ struct task_struct *task,
-+ perf_overflow_handler_t callback)
-+{
-+ return perf_event_create_kernel_counter(attr, cpu, task, callback, NULL);
-+}
-+#else
-+static inline struct perf_event *
-+wrapper_perf_event_create_kernel_counter(struct perf_event_attr *attr,
-+ int cpu,
-+ struct task_struct *task,
-+ perf_overflow_handler_t callback)
-+{
-+ return perf_event_create_kernel_counter(attr, cpu, task, callback);
-+}
-+#endif
-+
-+#endif /* _LTT_WRAPPER_PERF_H */
-diff --git a/drivers/staging/lttng/wrapper/poll.h b/drivers/staging/lttng/wrapper/poll.h
-new file mode 100644
-index 0000000..9c2d18f
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/poll.h
-@@ -0,0 +1,14 @@
-+#ifndef _LTTNG_WRAPPER_POLL_H
-+#define _LTTNG_WRAPPER_POLL_H
-+
-+/*
-+ * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/poll.h>
-+
-+#define poll_wait_set_exclusive(poll_table)
-+
-+#endif /* _LTTNG_WRAPPER_POLL_H */
-diff --git a/drivers/staging/lttng/wrapper/ringbuffer/api.h b/drivers/staging/lttng/wrapper/ringbuffer/api.h
-new file mode 100644
-index 0000000..182bee2
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/ringbuffer/api.h
-@@ -0,0 +1 @@
-+#include "../../lib/ringbuffer/api.h"
-diff --git a/drivers/staging/lttng/wrapper/ringbuffer/backend.h b/drivers/staging/lttng/wrapper/ringbuffer/backend.h
-new file mode 100644
-index 0000000..bfdd39d
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/ringbuffer/backend.h
-@@ -0,0 +1 @@
-+#include "../../lib/ringbuffer/backend.h"
-diff --git a/drivers/staging/lttng/wrapper/ringbuffer/backend_internal.h b/drivers/staging/lttng/wrapper/ringbuffer/backend_internal.h
-new file mode 100644
-index 0000000..00d45e4
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/ringbuffer/backend_internal.h
-@@ -0,0 +1,2 @@
-+#include "../../wrapper/inline_memcpy.h"
-+#include "../../lib/ringbuffer/backend_internal.h"
-diff --git a/drivers/staging/lttng/wrapper/ringbuffer/backend_types.h b/drivers/staging/lttng/wrapper/ringbuffer/backend_types.h
-new file mode 100644
-index 0000000..c59effd
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/ringbuffer/backend_types.h
-@@ -0,0 +1 @@
-+#include "../../lib/ringbuffer/backend_types.h"
-diff --git a/drivers/staging/lttng/wrapper/ringbuffer/config.h b/drivers/staging/lttng/wrapper/ringbuffer/config.h
-new file mode 100644
-index 0000000..0ce7a9d
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/ringbuffer/config.h
-@@ -0,0 +1 @@
-+#include "../../lib/ringbuffer/config.h"
-diff --git a/drivers/staging/lttng/wrapper/ringbuffer/frontend.h b/drivers/staging/lttng/wrapper/ringbuffer/frontend.h
-new file mode 100644
-index 0000000..7c6c070
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/ringbuffer/frontend.h
-@@ -0,0 +1 @@
-+#include "../../lib/ringbuffer/frontend.h"
-diff --git a/drivers/staging/lttng/wrapper/ringbuffer/frontend_api.h b/drivers/staging/lttng/wrapper/ringbuffer/frontend_api.h
-new file mode 100644
-index 0000000..b03c501
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/ringbuffer/frontend_api.h
-@@ -0,0 +1 @@
-+#include "../../lib/ringbuffer/frontend_api.h"
-diff --git a/drivers/staging/lttng/wrapper/ringbuffer/frontend_internal.h b/drivers/staging/lttng/wrapper/ringbuffer/frontend_internal.h
-new file mode 100644
-index 0000000..1899101
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/ringbuffer/frontend_internal.h
-@@ -0,0 +1 @@
-+#include "../../lib/ringbuffer/frontend_internal.h"
-diff --git a/drivers/staging/lttng/wrapper/ringbuffer/frontend_types.h b/drivers/staging/lttng/wrapper/ringbuffer/frontend_types.h
-new file mode 100644
-index 0000000..0c23244
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/ringbuffer/frontend_types.h
-@@ -0,0 +1 @@
-+#include "../../lib/ringbuffer/frontend_types.h"
-diff --git a/drivers/staging/lttng/wrapper/ringbuffer/iterator.h b/drivers/staging/lttng/wrapper/ringbuffer/iterator.h
-new file mode 100644
-index 0000000..76e9edb
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/ringbuffer/iterator.h
-@@ -0,0 +1 @@
-+#include "../../lib/ringbuffer/iterator.h"
-diff --git a/drivers/staging/lttng/wrapper/ringbuffer/nohz.h b/drivers/staging/lttng/wrapper/ringbuffer/nohz.h
-new file mode 100644
-index 0000000..9fbb84d
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/ringbuffer/nohz.h
-@@ -0,0 +1 @@
-+#include "../../lib/ringbuffer/nohz.h"
-diff --git a/drivers/staging/lttng/wrapper/ringbuffer/vatomic.h b/drivers/staging/lttng/wrapper/ringbuffer/vatomic.h
-new file mode 100644
-index 0000000..d578445
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/ringbuffer/vatomic.h
-@@ -0,0 +1 @@
-+#include "../../lib/ringbuffer/vatomic.h"
-diff --git a/drivers/staging/lttng/wrapper/ringbuffer/vfs.h b/drivers/staging/lttng/wrapper/ringbuffer/vfs.h
-new file mode 100644
-index 0000000..f8e9ed9
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/ringbuffer/vfs.h
-@@ -0,0 +1 @@
-+#include "../../lib/ringbuffer/vfs.h"
-diff --git a/drivers/staging/lttng/wrapper/spinlock.h b/drivers/staging/lttng/wrapper/spinlock.h
-new file mode 100644
-index 0000000..8b1ad99
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/spinlock.h
-@@ -0,0 +1,26 @@
-+#ifndef _LTT_WRAPPER_SPINLOCK_H
-+#define _LTT_WRAPPER_SPINLOCK_H
-+
-+/*
-+ * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/version.h>
-+
-+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
-+
-+#include <linux/string.h>
-+
-+#define raw_spin_lock_init(lock) \
-+ do { \
-+ raw_spinlock_t __lock = __RAW_SPIN_LOCK_UNLOCKED; \
-+ memcpy(lock, &__lock, sizeof(lock)); \
-+ } while (0)
-+
-+#define raw_spin_is_locked(lock) __raw_spin_is_locked(lock)
-+
-+
-+#endif
-+#endif /* _LTT_WRAPPER_SPINLOCK_H */
-diff --git a/drivers/staging/lttng/wrapper/splice.c b/drivers/staging/lttng/wrapper/splice.c
-new file mode 100644
-index 0000000..ba224ee
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/splice.c
-@@ -0,0 +1,46 @@
-+/*
-+ * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
-+ *
-+ * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
-+ * available, else we need to have a kernel that exports this function to GPL
-+ * modules.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#ifdef CONFIG_KALLSYMS
-+
-+#include <linux/kallsyms.h>
-+#include <linux/fs.h>
-+#include <linux/splice.h>
-+#include "kallsyms.h"
-+
-+static
-+ssize_t (*splice_to_pipe_sym)(struct pipe_inode_info *pipe,
-+ struct splice_pipe_desc *spd);
-+
-+ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
-+ struct splice_pipe_desc *spd)
-+{
-+ if (!splice_to_pipe_sym)
-+ splice_to_pipe_sym = (void *) kallsyms_lookup_funcptr("splice_to_pipe");
-+ if (splice_to_pipe_sym) {
-+ return splice_to_pipe_sym(pipe, spd);
-+ } else {
-+ printk(KERN_WARNING "LTTng: splice_to_pipe symbol lookup failed.\n");
-+ return -ENOSYS;
-+ }
-+}
-+
-+#else
-+
-+#include <linux/fs.h>
-+#include <linux/splice.h>
-+
-+ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
-+ struct splice_pipe_desc *spd)
-+{
-+ return splice_to_pipe(pipe, spd);
-+}
-+
-+#endif
-diff --git a/drivers/staging/lttng/wrapper/splice.h b/drivers/staging/lttng/wrapper/splice.h
-new file mode 100644
-index 0000000..f75309a
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/splice.h
-@@ -0,0 +1,23 @@
-+#ifndef _LTT_WRAPPER_SPLICE_H
-+#define _LTT_WRAPPER_SPLICE_H
-+
-+/*
-+ * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
-+ *
-+ * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
-+ * available, else we need to have a kernel that exports this function to GPL
-+ * modules.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/splice.h>
-+
-+ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
-+ struct splice_pipe_desc *spd);
-+
-+#ifndef PIPE_DEF_BUFFERS
-+#define PIPE_DEF_BUFFERS 16
-+#endif
-+
-+#endif /* _LTT_WRAPPER_SPLICE_H */
-diff --git a/drivers/staging/lttng/wrapper/trace-clock.h b/drivers/staging/lttng/wrapper/trace-clock.h
-new file mode 100644
-index 0000000..8b77428
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/trace-clock.h
-@@ -0,0 +1,75 @@
-+/*
-+ * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
-+ *
-+ * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic
-+ * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#ifndef _LTT_TRACE_CLOCK_H
-+#define _LTT_TRACE_CLOCK_H
-+
-+#ifdef CONFIG_HAVE_TRACE_CLOCK
-+#include <linux/trace-clock.h>
-+#else /* CONFIG_HAVE_TRACE_CLOCK */
-+
-+#include <linux/hardirq.h>
-+#include <linux/ktime.h>
-+#include <linux/time.h>
-+#include <linux/hrtimer.h>
-+
-+static inline u64 trace_clock_monotonic_wrapper(void)
-+{
-+ ktime_t ktime;
-+
-+ /*
-+ * Refuse to trace from NMIs with this wrapper, because an NMI could
-+ * nest over the xtime write seqlock and deadlock.
-+ */
-+ if (in_nmi())
-+ return (u64) -EIO;
-+
-+ ktime = ktime_get();
-+ return (u64) ktime.tv64;
-+}
-+
-+static inline u32 trace_clock_read32(void)
-+{
-+ return (u32) trace_clock_monotonic_wrapper();
-+}
-+
-+static inline u64 trace_clock_read64(void)
-+{
-+ return (u64) trace_clock_monotonic_wrapper();
-+}
-+
-+static inline u64 trace_clock_frequency(void)
-+{
-+ return (u64)NSEC_PER_SEC;
-+}
-+
-+static inline u32 trace_clock_freq_scale(void)
-+{
-+ return 1;
-+}
-+
-+static inline int get_trace_clock(void)
-+{
-+ printk(KERN_WARNING "LTTng: Using mainline kernel monotonic clock.\n");
-+ printk(KERN_WARNING " * NMIs will not be traced,\n");
-+ printk(KERN_WARNING " * expect significant performance degradation compared to the\n");
-+ printk(KERN_WARNING " LTTng trace clocks.\n");
-+ printk(KERN_WARNING "Integration of the LTTng 0.x trace clocks into LTTng 2.0 is planned\n");
-+ printk(KERN_WARNING "in a near future.\n");
-+
-+ return 0;
-+}
-+
-+static inline void put_trace_clock(void)
-+{
-+}
-+
-+#endif /* CONFIG_HAVE_TRACE_CLOCK */
-+
-+#endif /* _LTT_TRACE_CLOCK_H */
-diff --git a/drivers/staging/lttng/wrapper/uuid.h b/drivers/staging/lttng/wrapper/uuid.h
-new file mode 100644
-index 0000000..bfa67ff
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/uuid.h
-@@ -0,0 +1,29 @@
-+#ifndef _LTT_WRAPPER_UUID_H
-+#define _LTT_WRAPPER_UUID_H
-+
-+/*
-+ * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/version.h>
-+
-+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
-+#include <linux/uuid.h>
-+#else
-+
-+#include <linux/random.h>
-+
-+typedef struct {
-+ __u8 b[16];
-+} uuid_le;
-+
-+static inline
-+void uuid_le_gen(uuid_le *u)
-+{
-+ generate_random_uuid(u->b);
-+}
-+
-+#endif
-+#endif /* _LTT_WRAPPER_UUID_H */
-diff --git a/drivers/staging/lttng/wrapper/vmalloc.h b/drivers/staging/lttng/wrapper/vmalloc.h
-new file mode 100644
-index 0000000..765f2ad
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/vmalloc.h
-@@ -0,0 +1,49 @@
-+#ifndef _LTT_WRAPPER_VMALLOC_H
-+#define _LTT_WRAPPER_VMALLOC_H
-+
-+/*
-+ * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
-+ *
-+ * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
-+ * available, else we need to have a kernel that exports this function to GPL
-+ * modules.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#ifdef CONFIG_KALLSYMS
-+
-+#include <linux/kallsyms.h>
-+#include "kallsyms.h"
-+
-+static inline
-+void wrapper_vmalloc_sync_all(void)
-+{
-+ void (*vmalloc_sync_all_sym)(void);
-+
-+ vmalloc_sync_all_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_all");
-+ if (vmalloc_sync_all_sym) {
-+ vmalloc_sync_all_sym();
-+ } else {
-+#ifdef CONFIG_X86
-+ /*
-+ * Only x86 needs vmalloc_sync_all to make sure LTTng does not
-+ * trigger recursive page faults.
-+ */
-+ printk(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n");
-+ printk(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
-+#endif
-+ }
-+}
-+#else
-+
-+#include <linux/vmalloc.h>
-+
-+static inline
-+void wrapper_vmalloc_sync_all(void)
-+{
-+ return vmalloc_sync_all();
-+}
-+#endif
-+
-+#endif /* _LTT_WRAPPER_VMALLOC_H */
---
-1.7.9
-
diff --git a/patches.lttng/0007-lttng-instrumentation-tracepoint-events.patch b/patches.lttng/0007-lttng-instrumentation-tracepoint-events.patch
deleted file mode 100644
index 2aa7da85ea9..00000000000
--- a/patches.lttng/0007-lttng-instrumentation-tracepoint-events.patch
+++ /dev/null
@@ -1,3227 +0,0 @@
-From 763be8c0a919015a3c1e205005176d4aacec22e3 Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 28 Nov 2011 07:42:15 -0500
-Subject: lttng instrumentation: tracepoint events
-
-Modifications to the in-kernel TRACE_EVENT are needed to generate the
-compact event descriptions and the probe code LTTng generates. These
-changes could apply to upstream TRACE_EVENT, but requires changing the
-in-kernel API.
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- .../staging/lttng/instrumentation/events/README | 7 +
- .../instrumentation/events/lttng-module/block.h | 626 ++++++++++++++++++++
- .../instrumentation/events/lttng-module/irq.h | 155 +++++
- .../instrumentation/events/lttng-module/kvm.h | 312 ++++++++++
- .../instrumentation/events/lttng-module/lttng.h | 34 ++
- .../instrumentation/events/lttng-module/sched.h | 400 +++++++++++++
- .../instrumentation/events/lttng-module/syscalls.h | 76 +++
- .../lttng/instrumentation/events/mainline/block.h | 569 ++++++++++++++++++
- .../lttng/instrumentation/events/mainline/irq.h | 150 +++++
- .../lttng/instrumentation/events/mainline/kvm.h | 312 ++++++++++
- .../lttng/instrumentation/events/mainline/sched.h | 397 +++++++++++++
- .../instrumentation/events/mainline/syscalls.h | 75 +++
- 12 files changed, 3113 insertions(+), 0 deletions(-)
- create mode 100644 drivers/staging/lttng/instrumentation/events/README
- create mode 100644 drivers/staging/lttng/instrumentation/events/lttng-module/block.h
- create mode 100644 drivers/staging/lttng/instrumentation/events/lttng-module/irq.h
- create mode 100644 drivers/staging/lttng/instrumentation/events/lttng-module/kvm.h
- create mode 100644 drivers/staging/lttng/instrumentation/events/lttng-module/lttng.h
- create mode 100644 drivers/staging/lttng/instrumentation/events/lttng-module/sched.h
- create mode 100644 drivers/staging/lttng/instrumentation/events/lttng-module/syscalls.h
- create mode 100644 drivers/staging/lttng/instrumentation/events/mainline/block.h
- create mode 100644 drivers/staging/lttng/instrumentation/events/mainline/irq.h
- create mode 100644 drivers/staging/lttng/instrumentation/events/mainline/kvm.h
- create mode 100644 drivers/staging/lttng/instrumentation/events/mainline/sched.h
- create mode 100644 drivers/staging/lttng/instrumentation/events/mainline/syscalls.h
-
-diff --git a/drivers/staging/lttng/instrumentation/events/README b/drivers/staging/lttng/instrumentation/events/README
-new file mode 100644
-index 0000000..dad2cbb
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/events/README
-@@ -0,0 +1,7 @@
-+The workflow for updating patches from newer kernel:
-+
-+Diff mainline/ and lttng-module/ directories.
-+
-+Pull the new headers from mainline kernel to mainline/.
-+Copy them into lttng-modules.
-+Apply diff. Fix conflicts.
-diff --git a/drivers/staging/lttng/instrumentation/events/lttng-module/block.h b/drivers/staging/lttng/instrumentation/events/lttng-module/block.h
-new file mode 100644
-index 0000000..42184f3
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/events/lttng-module/block.h
-@@ -0,0 +1,626 @@
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM block
-+
-+#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_BLOCK_H
-+
-+#include <linux/blktrace_api.h>
-+#include <linux/blkdev.h>
-+#include <linux/tracepoint.h>
-+#include <linux/trace_seq.h>
-+
-+#ifndef _TRACE_BLOCK_DEF_
-+#define _TRACE_BLOCK_DEF_
-+
-+#define __blk_dump_cmd(cmd, len) "<unknown>"
-+
-+enum {
-+ RWBS_FLAG_WRITE = (1 << 0),
-+ RWBS_FLAG_DISCARD = (1 << 1),
-+ RWBS_FLAG_READ = (1 << 2),
-+ RWBS_FLAG_RAHEAD = (1 << 3),
-+ RWBS_FLAG_SYNC = (1 << 4),
-+ RWBS_FLAG_META = (1 << 5),
-+ RWBS_FLAG_SECURE = (1 << 6),
-+};
-+
-+#endif /* _TRACE_BLOCK_DEF_ */
-+
-+#define __print_rwbs_flags(rwbs) \
-+ __print_flags(rwbs, "", \
-+ { RWBS_FLAG_WRITE, "W" }, \
-+ { RWBS_FLAG_DISCARD, "D" }, \
-+ { RWBS_FLAG_READ, "R" }, \
-+ { RWBS_FLAG_RAHEAD, "A" }, \
-+ { RWBS_FLAG_SYNC, "S" }, \
-+ { RWBS_FLAG_META, "M" }, \
-+ { RWBS_FLAG_SECURE, "E" })
-+
-+#define blk_fill_rwbs(rwbs, rw, bytes) \
-+ tp_assign(rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
-+ ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \
-+ ( (bytes) ? RWBS_FLAG_READ : \
-+ ( 0 )))) \
-+ | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
-+ | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
-+ | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
-+ | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0))
-+
-+DECLARE_EVENT_CLASS(block_rq_with_error,
-+
-+ TP_PROTO(struct request_queue *q, struct request *rq),
-+
-+ TP_ARGS(q, rq),
-+
-+ TP_STRUCT__entry(
-+ __field( dev_t, dev )
-+ __field( sector_t, sector )
-+ __field( unsigned int, nr_sector )
-+ __field( int, errors )
-+ __field( unsigned int, rwbs )
-+ __dynamic_array_hex( unsigned char, cmd,
-+ (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
-+ rq->cmd_len : 0)
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(dev, rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
-+ tp_assign(sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
-+ 0 : blk_rq_pos(rq))
-+ tp_assign(nr_sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
-+ 0 : blk_rq_sectors(rq))
-+ tp_assign(errors, rq->errors)
-+ blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq))
-+ tp_memcpy_dyn(cmd, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
-+ rq->cmd : NULL);
-+ ),
-+
-+ TP_printk("%d,%d %s (%s) %llu + %u [%d]",
-+ MAJOR(__entry->dev), MINOR(__entry->dev),
-+ __print_rwbs_flags(__entry->rwbs),
-+ __blk_dump_cmd(__get_dynamic_array(cmd),
-+ __get_dynamic_array_len(cmd)),
-+ (unsigned long long)__entry->sector,
-+ __entry->nr_sector, __entry->errors)
-+)
-+
-+/**
-+ * block_rq_abort - abort block operation request
-+ * @q: queue containing the block operation request
-+ * @rq: block IO operation request
-+ *
-+ * Called immediately after pending block IO operation request @rq in
-+ * queue @q is aborted. The fields in the operation request @rq
-+ * can be examined to determine which device and sectors the pending
-+ * operation would access.
-+ */
-+DEFINE_EVENT(block_rq_with_error, block_rq_abort,
-+
-+ TP_PROTO(struct request_queue *q, struct request *rq),
-+
-+ TP_ARGS(q, rq)
-+)
-+
-+/**
-+ * block_rq_requeue - place block IO request back on a queue
-+ * @q: queue holding operation
-+ * @rq: block IO operation request
-+ *
-+ * The block operation request @rq is being placed back into queue
-+ * @q. For some reason the request was not completed and needs to be
-+ * put back in the queue.
-+ */
-+DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
-+
-+ TP_PROTO(struct request_queue *q, struct request *rq),
-+
-+ TP_ARGS(q, rq)
-+)
-+
-+/**
-+ * block_rq_complete - block IO operation completed by device driver
-+ * @q: queue containing the block operation request
-+ * @rq: block operations request
-+ *
-+ * The block_rq_complete tracepoint event indicates that some portion
-+ * of operation request has been completed by the device driver. If
-+ * the @rq->bio is %NULL, then there is absolutely no additional work to
-+ * do for the request. If @rq->bio is non-NULL then there is
-+ * additional work required to complete the request.
-+ */
-+DEFINE_EVENT(block_rq_with_error, block_rq_complete,
-+
-+ TP_PROTO(struct request_queue *q, struct request *rq),
-+
-+ TP_ARGS(q, rq)
-+)
-+
-+DECLARE_EVENT_CLASS(block_rq,
-+
-+ TP_PROTO(struct request_queue *q, struct request *rq),
-+
-+ TP_ARGS(q, rq),
-+
-+ TP_STRUCT__entry(
-+ __field( dev_t, dev )
-+ __field( sector_t, sector )
-+ __field( unsigned int, nr_sector )
-+ __field( unsigned int, bytes )
-+ __field( unsigned int, rwbs )
-+ __array_text( char, comm, TASK_COMM_LEN )
-+ __dynamic_array_hex( unsigned char, cmd,
-+ (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
-+ rq->cmd_len : 0)
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(dev, rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
-+ tp_assign(sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
-+ 0 : blk_rq_pos(rq))
-+ tp_assign(nr_sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
-+ 0 : blk_rq_sectors(rq))
-+ tp_assign(bytes, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
-+ blk_rq_bytes(rq) : 0)
-+ blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq))
-+ tp_memcpy_dyn(cmd, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
-+ rq->cmd : NULL);
-+ tp_memcpy(comm, current->comm, TASK_COMM_LEN)
-+ ),
-+
-+ TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
-+ MAJOR(__entry->dev), MINOR(__entry->dev),
-+ __print_rwbs_flags(__entry->rwbs),
-+ __entry->bytes,
-+ __blk_dump_cmd(__get_dynamic_array(cmd),
-+ __get_dynamic_array_len(cmd)),
-+ (unsigned long long)__entry->sector,
-+ __entry->nr_sector, __entry->comm)
-+)
-+
-+/**
-+ * block_rq_insert - insert block operation request into queue
-+ * @q: target queue
-+ * @rq: block IO operation request
-+ *
-+ * Called immediately before block operation request @rq is inserted
-+ * into queue @q. The fields in the operation request @rq struct can
-+ * be examined to determine which device and sectors the pending
-+ * operation would access.
-+ */
-+DEFINE_EVENT(block_rq, block_rq_insert,
-+
-+ TP_PROTO(struct request_queue *q, struct request *rq),
-+
-+ TP_ARGS(q, rq)
-+)
-+
-+/**
-+ * block_rq_issue - issue pending block IO request operation to device driver
-+ * @q: queue holding operation
-+ * @rq: block IO operation operation request
-+ *
-+ * Called when block operation request @rq from queue @q is sent to a
-+ * device driver for processing.
-+ */
-+DEFINE_EVENT(block_rq, block_rq_issue,
-+
-+ TP_PROTO(struct request_queue *q, struct request *rq),
-+
-+ TP_ARGS(q, rq)
-+)
-+
-+/**
-+ * block_bio_bounce - used bounce buffer when processing block operation
-+ * @q: queue holding the block operation
-+ * @bio: block operation
-+ *
-+ * A bounce buffer was used to handle the block operation @bio in @q.
-+ * This occurs when hardware limitations prevent a direct transfer of
-+ * data between the @bio data memory area and the IO device. Use of a
-+ * bounce buffer requires extra copying of data and decreases
-+ * performance.
-+ */
-+TRACE_EVENT(block_bio_bounce,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio),
-+
-+ TP_ARGS(q, bio),
-+
-+ TP_STRUCT__entry(
-+ __field( dev_t, dev )
-+ __field( sector_t, sector )
-+ __field( unsigned int, nr_sector )
-+ __field( unsigned int, rwbs )
-+ __array_text( char, comm, TASK_COMM_LEN )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(dev, bio->bi_bdev ?
-+ bio->bi_bdev->bd_dev : 0)
-+ tp_assign(sector, bio->bi_sector)
-+ tp_assign(nr_sector, bio->bi_size >> 9)
-+ blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
-+ tp_memcpy(comm, current->comm, TASK_COMM_LEN)
-+ ),
-+
-+ TP_printk("%d,%d %s %llu + %u [%s]",
-+ MAJOR(__entry->dev), MINOR(__entry->dev),
-+ __print_rwbs_flags(__entry->rwbs),
-+ (unsigned long long)__entry->sector,
-+ __entry->nr_sector, __entry->comm)
-+)
-+
-+/**
-+ * block_bio_complete - completed all work on the block operation
-+ * @q: queue holding the block operation
-+ * @bio: block operation completed
-+ * @error: io error value
-+ *
-+ * This tracepoint indicates there is no further work to do on this
-+ * block IO operation @bio.
-+ */
-+TRACE_EVENT(block_bio_complete,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio, int error),
-+
-+ TP_ARGS(q, bio, error),
-+
-+ TP_STRUCT__entry(
-+ __field( dev_t, dev )
-+ __field( sector_t, sector )
-+ __field( unsigned, nr_sector )
-+ __field( int, error )
-+ __field( unsigned int, rwbs )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(dev, bio->bi_bdev->bd_dev)
-+ tp_assign(sector, bio->bi_sector)
-+ tp_assign(nr_sector, bio->bi_size >> 9)
-+ tp_assign(error, error)
-+ blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
-+ ),
-+
-+ TP_printk("%d,%d %s %llu + %u [%d]",
-+ MAJOR(__entry->dev), MINOR(__entry->dev),
-+ __print_rwbs_flags(__entry->rwbs),
-+ (unsigned long long)__entry->sector,
-+ __entry->nr_sector, __entry->error)
-+)
-+
-+DECLARE_EVENT_CLASS(block_bio,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio),
-+
-+ TP_ARGS(q, bio),
-+
-+ TP_STRUCT__entry(
-+ __field( dev_t, dev )
-+ __field( sector_t, sector )
-+ __field( unsigned int, nr_sector )
-+ __field( unsigned int, rwbs )
-+ __array_text( char, comm, TASK_COMM_LEN )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(dev, bio->bi_bdev->bd_dev)
-+ tp_assign(sector, bio->bi_sector)
-+ tp_assign(nr_sector, bio->bi_size >> 9)
-+ blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
-+ tp_memcpy(comm, current->comm, TASK_COMM_LEN)
-+ ),
-+
-+ TP_printk("%d,%d %s %llu + %u [%s]",
-+ MAJOR(__entry->dev), MINOR(__entry->dev),
-+ __print_rwbs_flags(__entry->rwbs),
-+ (unsigned long long)__entry->sector,
-+ __entry->nr_sector, __entry->comm)
-+)
-+
-+/**
-+ * block_bio_backmerge - merging block operation to the end of an existing operation
-+ * @q: queue holding operation
-+ * @bio: new block operation to merge
-+ *
-+ * Merging block request @bio to the end of an existing block request
-+ * in queue @q.
-+ */
-+DEFINE_EVENT(block_bio, block_bio_backmerge,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio),
-+
-+ TP_ARGS(q, bio)
-+)
-+
-+/**
-+ * block_bio_frontmerge - merging block operation to the beginning of an existing operation
-+ * @q: queue holding operation
-+ * @bio: new block operation to merge
-+ *
-+ * Merging block IO operation @bio to the beginning of an existing block
-+ * operation in queue @q.
-+ */
-+DEFINE_EVENT(block_bio, block_bio_frontmerge,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio),
-+
-+ TP_ARGS(q, bio)
-+)
-+
-+/**
-+ * block_bio_queue - putting new block IO operation in queue
-+ * @q: queue holding operation
-+ * @bio: new block operation
-+ *
-+ * About to place the block IO operation @bio into queue @q.
-+ */
-+DEFINE_EVENT(block_bio, block_bio_queue,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio),
-+
-+ TP_ARGS(q, bio)
-+)
-+
-+DECLARE_EVENT_CLASS(block_get_rq,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
-+
-+ TP_ARGS(q, bio, rw),
-+
-+ TP_STRUCT__entry(
-+ __field( dev_t, dev )
-+ __field( sector_t, sector )
-+ __field( unsigned int, nr_sector )
-+ __field( unsigned int, rwbs )
-+ __array_text( char, comm, TASK_COMM_LEN )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(dev, bio ? bio->bi_bdev->bd_dev : 0)
-+ tp_assign(sector, bio ? bio->bi_sector : 0)
-+ tp_assign(nr_sector, bio ? bio->bi_size >> 9 : 0)
-+ blk_fill_rwbs(rwbs, bio ? bio->bi_rw : 0,
-+ bio ? bio->bi_size >> 9 : 0)
-+ tp_memcpy(comm, current->comm, TASK_COMM_LEN)
-+ ),
-+
-+ TP_printk("%d,%d %s %llu + %u [%s]",
-+ MAJOR(__entry->dev), MINOR(__entry->dev),
-+ __print_rwbs_flags(__entry->rwbs),
-+ (unsigned long long)__entry->sector,
-+ __entry->nr_sector, __entry->comm)
-+)
-+
-+/**
-+ * block_getrq - get a free request entry in queue for block IO operations
-+ * @q: queue for operations
-+ * @bio: pending block IO operation
-+ * @rw: low bit indicates a read (%0) or a write (%1)
-+ *
-+ * A request struct for queue @q has been allocated to handle the
-+ * block IO operation @bio.
-+ */
-+DEFINE_EVENT(block_get_rq, block_getrq,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
-+
-+ TP_ARGS(q, bio, rw)
-+)
-+
-+/**
-+ * block_sleeprq - waiting to get a free request entry in queue for block IO operation
-+ * @q: queue for operation
-+ * @bio: pending block IO operation
-+ * @rw: low bit indicates a read (%0) or a write (%1)
-+ *
-+ * In the case where a request struct cannot be provided for queue @q
-+ * the process needs to wait for an request struct to become
-+ * available. This tracepoint event is generated each time the
-+ * process goes to sleep waiting for request struct become available.
-+ */
-+DEFINE_EVENT(block_get_rq, block_sleeprq,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
-+
-+ TP_ARGS(q, bio, rw)
-+)
-+
-+/**
-+ * block_plug - keep operations requests in request queue
-+ * @q: request queue to plug
-+ *
-+ * Plug the request queue @q. Do not allow block operation requests
-+ * to be sent to the device driver. Instead, accumulate requests in
-+ * the queue to improve throughput performance of the block device.
-+ */
-+TRACE_EVENT(block_plug,
-+
-+ TP_PROTO(struct request_queue *q),
-+
-+ TP_ARGS(q),
-+
-+ TP_STRUCT__entry(
-+ __array_text( char, comm, TASK_COMM_LEN )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_memcpy(comm, current->comm, TASK_COMM_LEN)
-+ ),
-+
-+ TP_printk("[%s]", __entry->comm)
-+)
-+
-+DECLARE_EVENT_CLASS(block_unplug,
-+
-+ TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
-+
-+ TP_ARGS(q, depth, explicit),
-+
-+ TP_STRUCT__entry(
-+ __field( int, nr_rq )
-+ __array_text( char, comm, TASK_COMM_LEN )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(nr_rq, depth)
-+ tp_memcpy(comm, current->comm, TASK_COMM_LEN)
-+ ),
-+
-+ TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
-+)
-+
-+/**
-+ * block_unplug - release of operations requests in request queue
-+ * @q: request queue to unplug
-+ * @depth: number of requests just added to the queue
-+ * @explicit: whether this was an explicit unplug, or one from schedule()
-+ *
-+ * Unplug request queue @q because device driver is scheduled to work
-+ * on elements in the request queue.
-+ */
-+DEFINE_EVENT(block_unplug, block_unplug,
-+
-+ TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
-+
-+ TP_ARGS(q, depth, explicit)
-+)
-+
-+/**
-+ * block_split - split a single bio struct into two bio structs
-+ * @q: queue containing the bio
-+ * @bio: block operation being split
-+ * @new_sector: The starting sector for the new bio
-+ *
-+ * The bio request @bio in request queue @q needs to be split into two
-+ * bio requests. The newly created @bio request starts at
-+ * @new_sector. This split may be required due to hardware limitation
-+ * such as operation crossing device boundaries in a RAID system.
-+ */
-+TRACE_EVENT(block_split,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio,
-+ unsigned int new_sector),
-+
-+ TP_ARGS(q, bio, new_sector),
-+
-+ TP_STRUCT__entry(
-+ __field( dev_t, dev )
-+ __field( sector_t, sector )
-+ __field( sector_t, new_sector )
-+ __field( unsigned int, rwbs )
-+ __array_text( char, comm, TASK_COMM_LEN )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(dev, bio->bi_bdev->bd_dev)
-+ tp_assign(sector, bio->bi_sector)
-+ tp_assign(new_sector, new_sector)
-+ blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
-+ tp_memcpy(comm, current->comm, TASK_COMM_LEN)
-+ ),
-+
-+ TP_printk("%d,%d %s %llu / %llu [%s]",
-+ MAJOR(__entry->dev), MINOR(__entry->dev),
-+ __print_rwbs_flags(__entry->rwbs),
-+ (unsigned long long)__entry->sector,
-+ (unsigned long long)__entry->new_sector,
-+ __entry->comm)
-+)
-+
-+/**
-+ * block_bio_remap - map request for a logical device to the raw device
-+ * @q: queue holding the operation
-+ * @bio: revised operation
-+ * @dev: device for the operation
-+ * @from: original sector for the operation
-+ *
-+ * An operation for a logical device has been mapped to the
-+ * raw block device.
-+ */
-+TRACE_EVENT(block_bio_remap,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
-+ sector_t from),
-+
-+ TP_ARGS(q, bio, dev, from),
-+
-+ TP_STRUCT__entry(
-+ __field( dev_t, dev )
-+ __field( sector_t, sector )
-+ __field( unsigned int, nr_sector )
-+ __field( dev_t, old_dev )
-+ __field( sector_t, old_sector )
-+ __field( unsigned int, rwbs )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(dev, bio->bi_bdev->bd_dev)
-+ tp_assign(sector, bio->bi_sector)
-+ tp_assign(nr_sector, bio->bi_size >> 9)
-+ tp_assign(old_dev, dev)
-+ tp_assign(old_sector, from)
-+ blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
-+ ),
-+
-+ TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
-+ MAJOR(__entry->dev), MINOR(__entry->dev),
-+ __print_rwbs_flags(__entry->rwbs),
-+ (unsigned long long)__entry->sector,
-+ __entry->nr_sector,
-+ MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
-+ (unsigned long long)__entry->old_sector)
-+)
-+
-+/**
-+ * block_rq_remap - map request for a block operation request
-+ * @q: queue holding the operation
-+ * @rq: block IO operation request
-+ * @dev: device for the operation
-+ * @from: original sector for the operation
-+ *
-+ * The block operation request @rq in @q has been remapped. The block
-+ * operation request @rq holds the current information and @from hold
-+ * the original sector.
-+ */
-+TRACE_EVENT(block_rq_remap,
-+
-+ TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
-+ sector_t from),
-+
-+ TP_ARGS(q, rq, dev, from),
-+
-+ TP_STRUCT__entry(
-+ __field( dev_t, dev )
-+ __field( sector_t, sector )
-+ __field( unsigned int, nr_sector )
-+ __field( dev_t, old_dev )
-+ __field( sector_t, old_sector )
-+ __field( unsigned int, rwbs )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(dev, disk_devt(rq->rq_disk))
-+ tp_assign(sector, blk_rq_pos(rq))
-+ tp_assign(nr_sector, blk_rq_sectors(rq))
-+ tp_assign(old_dev, dev)
-+ tp_assign(old_sector, from)
-+ blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq))
-+ ),
-+
-+ TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
-+ MAJOR(__entry->dev), MINOR(__entry->dev),
-+ __print_rwbs_flags(__entry->rwbs),
-+ (unsigned long long)__entry->sector,
-+ __entry->nr_sector,
-+ MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
-+ (unsigned long long)__entry->old_sector)
-+)
-+
-+#undef __print_rwbs_flags
-+#undef blk_fill_rwbs
-+
-+#endif /* _TRACE_BLOCK_H */
-+
-+/* This part must be outside protection */
-+#include "../../../probes/define_trace.h"
-+
-diff --git a/drivers/staging/lttng/instrumentation/events/lttng-module/irq.h b/drivers/staging/lttng/instrumentation/events/lttng-module/irq.h
-new file mode 100644
-index 0000000..344015d
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/events/lttng-module/irq.h
-@@ -0,0 +1,155 @@
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM irq
-+
-+#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_IRQ_H
-+
-+#include <linux/tracepoint.h>
-+
-+#ifndef _TRACE_IRQ_DEF_
-+#define _TRACE_IRQ_DEF_
-+
-+struct irqaction;
-+struct softirq_action;
-+
-+#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
-+#define show_softirq_name(val) \
-+ __print_symbolic(val, \
-+ softirq_name(HI), \
-+ softirq_name(TIMER), \
-+ softirq_name(NET_TX), \
-+ softirq_name(NET_RX), \
-+ softirq_name(BLOCK), \
-+ softirq_name(BLOCK_IOPOLL), \
-+ softirq_name(TASKLET), \
-+ softirq_name(SCHED), \
-+ softirq_name(HRTIMER), \
-+ softirq_name(RCU))
-+
-+#endif /* _TRACE_IRQ_DEF_ */
-+
-+/**
-+ * irq_handler_entry - called immediately before the irq action handler
-+ * @irq: irq number
-+ * @action: pointer to struct irqaction
-+ *
-+ * The struct irqaction pointed to by @action contains various
-+ * information about the handler, including the device name,
-+ * @action->name, and the device id, @action->dev_id. When used in
-+ * conjunction with the irq_handler_exit tracepoint, we can figure
-+ * out irq handler latencies.
-+ */
-+TRACE_EVENT(irq_handler_entry,
-+
-+ TP_PROTO(int irq, struct irqaction *action),
-+
-+ TP_ARGS(irq, action),
-+
-+ TP_STRUCT__entry(
-+ __field( int, irq )
-+ __string( name, action->name )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(irq, irq)
-+ tp_strcpy(name, action->name)
-+ ),
-+
-+ TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
-+)
-+
-+/**
-+ * irq_handler_exit - called immediately after the irq action handler returns
-+ * @irq: irq number
-+ * @action: pointer to struct irqaction
-+ * @ret: return value
-+ *
-+ * If the @ret value is set to IRQ_HANDLED, then we know that the corresponding
-+ * @action->handler scuccessully handled this irq. Otherwise, the irq might be
-+ * a shared irq line, or the irq was not handled successfully. Can be used in
-+ * conjunction with the irq_handler_entry to understand irq handler latencies.
-+ */
-+TRACE_EVENT(irq_handler_exit,
-+
-+ TP_PROTO(int irq, struct irqaction *action, int ret),
-+
-+ TP_ARGS(irq, action, ret),
-+
-+ TP_STRUCT__entry(
-+ __field( int, irq )
-+ __field( int, ret )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(irq, irq)
-+ tp_assign(ret, ret)
-+ ),
-+
-+ TP_printk("irq=%d ret=%s",
-+ __entry->irq, __entry->ret ? "handled" : "unhandled")
-+)
-+
-+DECLARE_EVENT_CLASS(softirq,
-+
-+ TP_PROTO(unsigned int vec_nr),
-+
-+ TP_ARGS(vec_nr),
-+
-+ TP_STRUCT__entry(
-+ __field( unsigned int, vec )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(vec, vec_nr)
-+ ),
-+
-+ TP_printk("vec=%u [action=%s]", __entry->vec,
-+ show_softirq_name(__entry->vec))
-+)
-+
-+/**
-+ * softirq_entry - called immediately before the softirq handler
-+ * @vec_nr: softirq vector number
-+ *
-+ * When used in combination with the softirq_exit tracepoint
-+ * we can determine the softirq handler runtine.
-+ */
-+DEFINE_EVENT(softirq, softirq_entry,
-+
-+ TP_PROTO(unsigned int vec_nr),
-+
-+ TP_ARGS(vec_nr)
-+)
-+
-+/**
-+ * softirq_exit - called immediately after the softirq handler returns
-+ * @vec_nr: softirq vector number
-+ *
-+ * When used in combination with the softirq_entry tracepoint
-+ * we can determine the softirq handler runtine.
-+ */
-+DEFINE_EVENT(softirq, softirq_exit,
-+
-+ TP_PROTO(unsigned int vec_nr),
-+
-+ TP_ARGS(vec_nr)
-+)
-+
-+/**
-+ * softirq_raise - called immediately when a softirq is raised
-+ * @vec_nr: softirq vector number
-+ *
-+ * When used in combination with the softirq_entry tracepoint
-+ * we can determine the softirq raise to run latency.
-+ */
-+DEFINE_EVENT(softirq, softirq_raise,
-+
-+ TP_PROTO(unsigned int vec_nr),
-+
-+ TP_ARGS(vec_nr)
-+)
-+
-+#endif /* _TRACE_IRQ_H */
-+
-+/* This part must be outside protection */
-+#include "../../../probes/define_trace.h"
-diff --git a/drivers/staging/lttng/instrumentation/events/lttng-module/kvm.h b/drivers/staging/lttng/instrumentation/events/lttng-module/kvm.h
-new file mode 100644
-index 0000000..e10455b
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/events/lttng-module/kvm.h
-@@ -0,0 +1,312 @@
-+#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_KVM_MAIN_H
-+
-+#include <linux/tracepoint.h>
-+
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM kvm
-+
-+#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
-+
-+#define kvm_trace_exit_reason \
-+ ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
-+ ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
-+ ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
-+ ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
-+ ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI)
-+
-+TRACE_EVENT(kvm_userspace_exit,
-+ TP_PROTO(__u32 reason, int errno),
-+ TP_ARGS(reason, errno),
-+
-+ TP_STRUCT__entry(
-+ __field( __u32, reason )
-+ __field( int, errno )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(reason, reason)
-+ tp_assign(errno, errno)
-+ ),
-+
-+ TP_printk("reason %s (%d)",
-+ __entry->errno < 0 ?
-+ (__entry->errno == -EINTR ? "restart" : "error") :
-+ __print_symbolic(__entry->reason, kvm_trace_exit_reason),
-+ __entry->errno < 0 ? -__entry->errno : __entry->reason)
-+)
-+
-+#if defined(__KVM_HAVE_IOAPIC)
-+TRACE_EVENT(kvm_set_irq,
-+ TP_PROTO(unsigned int gsi, int level, int irq_source_id),
-+ TP_ARGS(gsi, level, irq_source_id),
-+
-+ TP_STRUCT__entry(
-+ __field( unsigned int, gsi )
-+ __field( int, level )
-+ __field( int, irq_source_id )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(gsi, gsi)
-+ tp_assign(level, level)
-+ tp_assign(irq_source_id, irq_source_id)
-+ ),
-+
-+ TP_printk("gsi %u level %d source %d",
-+ __entry->gsi, __entry->level, __entry->irq_source_id)
-+)
-+
-+#define kvm_deliver_mode \
-+ {0x0, "Fixed"}, \
-+ {0x1, "LowPrio"}, \
-+ {0x2, "SMI"}, \
-+ {0x3, "Res3"}, \
-+ {0x4, "NMI"}, \
-+ {0x5, "INIT"}, \
-+ {0x6, "SIPI"}, \
-+ {0x7, "ExtINT"}
-+
-+TRACE_EVENT(kvm_ioapic_set_irq,
-+ TP_PROTO(__u64 e, int pin, bool coalesced),
-+ TP_ARGS(e, pin, coalesced),
-+
-+ TP_STRUCT__entry(
-+ __field( __u64, e )
-+ __field( int, pin )
-+ __field( bool, coalesced )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(e, e)
-+ tp_assign(pin, pin)
-+ tp_assign(coalesced, coalesced)
-+ ),
-+
-+ TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
-+ __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
-+ __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
-+ (__entry->e & (1<<11)) ? "logical" : "physical",
-+ (__entry->e & (1<<15)) ? "level" : "edge",
-+ (__entry->e & (1<<16)) ? "|masked" : "",
-+ __entry->coalesced ? " (coalesced)" : "")
-+)
-+
-+TRACE_EVENT(kvm_msi_set_irq,
-+ TP_PROTO(__u64 address, __u64 data),
-+ TP_ARGS(address, data),
-+
-+ TP_STRUCT__entry(
-+ __field( __u64, address )
-+ __field( __u64, data )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(address, address)
-+ tp_assign(data, data)
-+ ),
-+
-+ TP_printk("dst %u vec %x (%s|%s|%s%s)",
-+ (u8)(__entry->address >> 12), (u8)__entry->data,
-+ __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
-+ (__entry->address & (1<<2)) ? "logical" : "physical",
-+ (__entry->data & (1<<15)) ? "level" : "edge",
-+ (__entry->address & (1<<3)) ? "|rh" : "")
-+)
-+
-+#define kvm_irqchips \
-+ {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
-+ {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
-+ {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
-+
-+TRACE_EVENT(kvm_ack_irq,
-+ TP_PROTO(unsigned int irqchip, unsigned int pin),
-+ TP_ARGS(irqchip, pin),
-+
-+ TP_STRUCT__entry(
-+ __field( unsigned int, irqchip )
-+ __field( unsigned int, pin )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(irqchip, irqchip)
-+ tp_assign(pin, pin)
-+ ),
-+
-+ TP_printk("irqchip %s pin %u",
-+ __print_symbolic(__entry->irqchip, kvm_irqchips),
-+ __entry->pin)
-+)
-+
-+
-+
-+#endif /* defined(__KVM_HAVE_IOAPIC) */
-+
-+#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
-+#define KVM_TRACE_MMIO_READ 1
-+#define KVM_TRACE_MMIO_WRITE 2
-+
-+#define kvm_trace_symbol_mmio \
-+ { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
-+ { KVM_TRACE_MMIO_READ, "read" }, \
-+ { KVM_TRACE_MMIO_WRITE, "write" }
-+
-+TRACE_EVENT(kvm_mmio,
-+ TP_PROTO(int type, int len, u64 gpa, u64 val),
-+ TP_ARGS(type, len, gpa, val),
-+
-+ TP_STRUCT__entry(
-+ __field( u32, type )
-+ __field( u32, len )
-+ __field( u64, gpa )
-+ __field( u64, val )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(type, type)
-+ tp_assign(len, len)
-+ tp_assign(gpa, gpa)
-+ tp_assign(val, val)
-+ ),
-+
-+ TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
-+ __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
-+ __entry->len, __entry->gpa, __entry->val)
-+)
-+
-+#define kvm_fpu_load_symbol \
-+ {0, "unload"}, \
-+ {1, "load"}
-+
-+TRACE_EVENT(kvm_fpu,
-+ TP_PROTO(int load),
-+ TP_ARGS(load),
-+
-+ TP_STRUCT__entry(
-+ __field( u32, load )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(load, load)
-+ ),
-+
-+ TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
-+)
-+
-+TRACE_EVENT(kvm_age_page,
-+ TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
-+ TP_ARGS(hva, slot, ref),
-+
-+ TP_STRUCT__entry(
-+ __field( u64, hva )
-+ __field( u64, gfn )
-+ __field( u8, referenced )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(hva, hva)
-+ tp_assign(gfn,
-+ slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT))
-+ tp_assign(referenced, ref)
-+ ),
-+
-+ TP_printk("hva %llx gfn %llx %s",
-+ __entry->hva, __entry->gfn,
-+ __entry->referenced ? "YOUNG" : "OLD")
-+)
-+
-+#ifdef CONFIG_KVM_ASYNC_PF
-+DECLARE_EVENT_CLASS(kvm_async_get_page_class,
-+
-+ TP_PROTO(u64 gva, u64 gfn),
-+
-+ TP_ARGS(gva, gfn),
-+
-+ TP_STRUCT__entry(
-+ __field(__u64, gva)
-+ __field(u64, gfn)
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(gva, gva)
-+ tp_assign(gfn, gfn)
-+ ),
-+
-+ TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
-+)
-+
-+DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
-+
-+ TP_PROTO(u64 gva, u64 gfn),
-+
-+ TP_ARGS(gva, gfn)
-+)
-+
-+DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
-+
-+ TP_PROTO(u64 gva, u64 gfn),
-+
-+ TP_ARGS(gva, gfn)
-+)
-+
-+DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
-+
-+ TP_PROTO(u64 token, u64 gva),
-+
-+ TP_ARGS(token, gva),
-+
-+ TP_STRUCT__entry(
-+ __field(__u64, token)
-+ __field(__u64, gva)
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(token, token)
-+ tp_assign(gva, gva)
-+ ),
-+
-+ TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
-+
-+)
-+
-+DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
-+
-+ TP_PROTO(u64 token, u64 gva),
-+
-+ TP_ARGS(token, gva)
-+)
-+
-+DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
-+
-+ TP_PROTO(u64 token, u64 gva),
-+
-+ TP_ARGS(token, gva)
-+)
-+
-+TRACE_EVENT(
-+ kvm_async_pf_completed,
-+ TP_PROTO(unsigned long address, struct page *page, u64 gva),
-+ TP_ARGS(address, page, gva),
-+
-+ TP_STRUCT__entry(
-+ __field(unsigned long, address)
-+ __field(pfn_t, pfn)
-+ __field(u64, gva)
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(address, address)
-+ tp_assign(pfn, page ? page_to_pfn(page) : 0)
-+ tp_assign(gva, gva)
-+ ),
-+
-+ TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva,
-+ __entry->address, __entry->pfn)
-+)
-+
-+#endif
-+
-+#endif /* _TRACE_KVM_MAIN_H */
-+
-+/* This part must be outside protection */
-+#include "../../../probes/define_trace.h"
-diff --git a/drivers/staging/lttng/instrumentation/events/lttng-module/lttng.h b/drivers/staging/lttng/instrumentation/events/lttng-module/lttng.h
-new file mode 100644
-index 0000000..6f3d6d1
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/events/lttng-module/lttng.h
-@@ -0,0 +1,34 @@
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM lttng
-+
-+#if !defined(_TRACE_LTTNG_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_LTTNG_H
-+
-+#include <linux/tracepoint.h>
-+
-+TRACE_EVENT(lttng_metadata,
-+
-+ TP_PROTO(const char *str),
-+
-+ TP_ARGS(str),
-+
-+ /*
-+ * Not exactly a string: more a sequence of bytes (dynamic
-+ * array) without the length. This is a dummy anyway: we only
-+ * use this declaration to generate an event metadata entry.
-+ */
-+ TP_STRUCT__entry(
-+ __string( str, str )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_strcpy(str, str)
-+ ),
-+
-+ TP_printk("")
-+)
-+
-+#endif /* _TRACE_LTTNG_H */
-+
-+/* This part must be outside protection */
-+#include "../../../probes/define_trace.h"
-diff --git a/drivers/staging/lttng/instrumentation/events/lttng-module/sched.h b/drivers/staging/lttng/instrumentation/events/lttng-module/sched.h
-new file mode 100644
-index 0000000..33f6921
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/events/lttng-module/sched.h
-@@ -0,0 +1,400 @@
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM sched
-+
-+#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_SCHED_H
-+
-+#include <linux/sched.h>
-+#include <linux/tracepoint.h>
-+
-+#ifndef _TRACE_SCHED_DEF_
-+#define _TRACE_SCHED_DEF_
-+
-+static inline long __trace_sched_switch_state(struct task_struct *p)
-+{
-+ long state = p->state;
-+
-+#ifdef CONFIG_PREEMPT
-+ /*
-+ * For all intents and purposes a preempted task is a running task.
-+ */
-+ if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
-+ state = TASK_RUNNING;
-+#endif
-+
-+ return state;
-+}
-+
-+#endif /* _TRACE_SCHED_DEF_ */
-+
-+/*
-+ * Tracepoint for calling kthread_stop, performed to end a kthread:
-+ */
-+TRACE_EVENT(sched_kthread_stop,
-+
-+ TP_PROTO(struct task_struct *t),
-+
-+ TP_ARGS(t),
-+
-+ TP_STRUCT__entry(
-+ __array_text( char, comm, TASK_COMM_LEN )
-+ __field( pid_t, tid )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_memcpy(comm, t->comm, TASK_COMM_LEN)
-+ tp_assign(tid, t->pid)
-+ ),
-+
-+ TP_printk("comm=%s tid=%d", __entry->comm, __entry->tid)
-+)
-+
-+/*
-+ * Tracepoint for the return value of the kthread stopping:
-+ */
-+TRACE_EVENT(sched_kthread_stop_ret,
-+
-+ TP_PROTO(int ret),
-+
-+ TP_ARGS(ret),
-+
-+ TP_STRUCT__entry(
-+ __field( int, ret )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(ret, ret)
-+ ),
-+
-+ TP_printk("ret=%d", __entry->ret)
-+)
-+
-+/*
-+ * Tracepoint for waking up a task:
-+ */
-+DECLARE_EVENT_CLASS(sched_wakeup_template,
-+
-+ TP_PROTO(struct task_struct *p, int success),
-+
-+ TP_ARGS(p, success),
-+
-+ TP_STRUCT__entry(
-+ __array_text( char, comm, TASK_COMM_LEN )
-+ __field( pid_t, tid )
-+ __field( int, prio )
-+ __field( int, success )
-+ __field( int, target_cpu )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_memcpy(comm, p->comm, TASK_COMM_LEN)
-+ tp_assign(tid, p->pid)
-+ tp_assign(prio, p->prio)
-+ tp_assign(success, success)
-+ tp_assign(target_cpu, task_cpu(p))
-+ ),
-+
-+ TP_printk("comm=%s tid=%d prio=%d success=%d target_cpu=%03d",
-+ __entry->comm, __entry->tid, __entry->prio,
-+ __entry->success, __entry->target_cpu)
-+)
-+
-+DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
-+ TP_PROTO(struct task_struct *p, int success),
-+ TP_ARGS(p, success))
-+
-+/*
-+ * Tracepoint for waking up a new task:
-+ */
-+DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
-+ TP_PROTO(struct task_struct *p, int success),
-+ TP_ARGS(p, success))
-+
-+/*
-+ * Tracepoint for task switches, performed by the scheduler:
-+ */
-+TRACE_EVENT(sched_switch,
-+
-+ TP_PROTO(struct task_struct *prev,
-+ struct task_struct *next),
-+
-+ TP_ARGS(prev, next),
-+
-+ TP_STRUCT__entry(
-+ __array_text( char, prev_comm, TASK_COMM_LEN )
-+ __field( pid_t, prev_tid )
-+ __field( int, prev_prio )
-+ __field( long, prev_state )
-+ __array_text( char, next_comm, TASK_COMM_LEN )
-+ __field( pid_t, next_tid )
-+ __field( int, next_prio )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_memcpy(next_comm, next->comm, TASK_COMM_LEN)
-+ tp_assign(prev_tid, prev->pid)
-+ tp_assign(prev_prio, prev->prio - MAX_RT_PRIO)
-+ tp_assign(prev_state, __trace_sched_switch_state(prev))
-+ tp_memcpy(prev_comm, prev->comm, TASK_COMM_LEN)
-+ tp_assign(next_tid, next->pid)
-+ tp_assign(next_prio, next->prio - MAX_RT_PRIO)
-+ ),
-+
-+ TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_tid=%d next_prio=%d",
-+ __entry->prev_comm, __entry->prev_tid, __entry->prev_prio,
-+ __entry->prev_state ?
-+ __print_flags(__entry->prev_state, "|",
-+ { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
-+ { 16, "Z" }, { 32, "X" }, { 64, "x" },
-+ { 128, "W" }) : "R",
-+ __entry->next_comm, __entry->next_tid, __entry->next_prio)
-+)
-+
-+/*
-+ * Tracepoint for a task being migrated:
-+ */
-+TRACE_EVENT(sched_migrate_task,
-+
-+ TP_PROTO(struct task_struct *p, int dest_cpu),
-+
-+ TP_ARGS(p, dest_cpu),
-+
-+ TP_STRUCT__entry(
-+ __array_text( char, comm, TASK_COMM_LEN )
-+ __field( pid_t, tid )
-+ __field( int, prio )
-+ __field( int, orig_cpu )
-+ __field( int, dest_cpu )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_memcpy(comm, p->comm, TASK_COMM_LEN)
-+ tp_assign(tid, p->pid)
-+ tp_assign(prio, p->prio - MAX_RT_PRIO)
-+ tp_assign(orig_cpu, task_cpu(p))
-+ tp_assign(dest_cpu, dest_cpu)
-+ ),
-+
-+ TP_printk("comm=%s tid=%d prio=%d orig_cpu=%d dest_cpu=%d",
-+ __entry->comm, __entry->tid, __entry->prio,
-+ __entry->orig_cpu, __entry->dest_cpu)
-+)
-+
-+DECLARE_EVENT_CLASS(sched_process_template,
-+
-+ TP_PROTO(struct task_struct *p),
-+
-+ TP_ARGS(p),
-+
-+ TP_STRUCT__entry(
-+ __array_text( char, comm, TASK_COMM_LEN )
-+ __field( pid_t, tid )
-+ __field( int, prio )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_memcpy(comm, p->comm, TASK_COMM_LEN)
-+ tp_assign(tid, p->pid)
-+ tp_assign(prio, p->prio - MAX_RT_PRIO)
-+ ),
-+
-+ TP_printk("comm=%s tid=%d prio=%d",
-+ __entry->comm, __entry->tid, __entry->prio)
-+)
-+
-+/*
-+ * Tracepoint for freeing a task:
-+ */
-+DEFINE_EVENT(sched_process_template, sched_process_free,
-+ TP_PROTO(struct task_struct *p),
-+ TP_ARGS(p))
-+
-+
-+/*
-+ * Tracepoint for a task exiting:
-+ */
-+DEFINE_EVENT(sched_process_template, sched_process_exit,
-+ TP_PROTO(struct task_struct *p),
-+ TP_ARGS(p))
-+
-+/*
-+ * Tracepoint for waiting on task to unschedule:
-+ */
-+DEFINE_EVENT(sched_process_template, sched_wait_task,
-+ TP_PROTO(struct task_struct *p),
-+ TP_ARGS(p))
-+
-+/*
-+ * Tracepoint for a waiting task:
-+ */
-+TRACE_EVENT(sched_process_wait,
-+
-+ TP_PROTO(struct pid *pid),
-+
-+ TP_ARGS(pid),
-+
-+ TP_STRUCT__entry(
-+ __array_text( char, comm, TASK_COMM_LEN )
-+ __field( pid_t, tid )
-+ __field( int, prio )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_memcpy(comm, current->comm, TASK_COMM_LEN)
-+ tp_assign(tid, pid_nr(pid))
-+ tp_assign(prio, current->prio - MAX_RT_PRIO)
-+ ),
-+
-+ TP_printk("comm=%s tid=%d prio=%d",
-+ __entry->comm, __entry->tid, __entry->prio)
-+)
-+
-+/*
-+ * Tracepoint for do_fork:
-+ */
-+TRACE_EVENT(sched_process_fork,
-+
-+ TP_PROTO(struct task_struct *parent, struct task_struct *child),
-+
-+ TP_ARGS(parent, child),
-+
-+ TP_STRUCT__entry(
-+ __array_text( char, parent_comm, TASK_COMM_LEN )
-+ __field( pid_t, parent_tid )
-+ __array_text( char, child_comm, TASK_COMM_LEN )
-+ __field( pid_t, child_tid )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_memcpy(parent_comm, parent->comm, TASK_COMM_LEN)
-+ tp_assign(parent_tid, parent->pid)
-+ tp_memcpy(child_comm, child->comm, TASK_COMM_LEN)
-+ tp_assign(child_tid, child->pid)
-+ ),
-+
-+ TP_printk("comm=%s tid=%d child_comm=%s child_tid=%d",
-+ __entry->parent_comm, __entry->parent_tid,
-+ __entry->child_comm, __entry->child_tid)
-+)
-+
-+/*
-+ * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
-+ * adding sched_stat support to SCHED_FIFO/RR would be welcome.
-+ */
-+DECLARE_EVENT_CLASS(sched_stat_template,
-+
-+ TP_PROTO(struct task_struct *tsk, u64 delay),
-+
-+ TP_ARGS(tsk, delay),
-+
-+ TP_STRUCT__entry(
-+ __array_text( char, comm, TASK_COMM_LEN )
-+ __field( pid_t, tid )
-+ __field( u64, delay )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
-+ tp_assign(tid, tsk->pid)
-+ tp_assign(delay, delay)
-+ )
-+ TP_perf_assign(
-+ __perf_count(delay)
-+ ),
-+
-+ TP_printk("comm=%s tid=%d delay=%Lu [ns]",
-+ __entry->comm, __entry->tid,
-+ (unsigned long long)__entry->delay)
-+)
-+
-+
-+/*
-+ * Tracepoint for accounting wait time (time the task is runnable
-+ * but not actually running due to scheduler contention).
-+ */
-+DEFINE_EVENT(sched_stat_template, sched_stat_wait,
-+ TP_PROTO(struct task_struct *tsk, u64 delay),
-+ TP_ARGS(tsk, delay))
-+
-+/*
-+ * Tracepoint for accounting sleep time (time the task is not runnable,
-+ * including iowait, see below).
-+ */
-+DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
-+ TP_PROTO(struct task_struct *tsk, u64 delay),
-+ TP_ARGS(tsk, delay))
-+
-+/*
-+ * Tracepoint for accounting iowait time (time the task is not runnable
-+ * due to waiting on IO to complete).
-+ */
-+DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
-+ TP_PROTO(struct task_struct *tsk, u64 delay),
-+ TP_ARGS(tsk, delay))
-+
-+/*
-+ * Tracepoint for accounting runtime (time the task is executing
-+ * on a CPU).
-+ */
-+TRACE_EVENT(sched_stat_runtime,
-+
-+ TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
-+
-+ TP_ARGS(tsk, runtime, vruntime),
-+
-+ TP_STRUCT__entry(
-+ __array_text( char, comm, TASK_COMM_LEN )
-+ __field( pid_t, tid )
-+ __field( u64, runtime )
-+ __field( u64, vruntime )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
-+ tp_assign(tid, tsk->pid)
-+ tp_assign(runtime, runtime)
-+ tp_assign(vruntime, vruntime)
-+ )
-+ TP_perf_assign(
-+ __perf_count(runtime)
-+ ),
-+
-+ TP_printk("comm=%s tid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
-+ __entry->comm, __entry->tid,
-+ (unsigned long long)__entry->runtime,
-+ (unsigned long long)__entry->vruntime)
-+)
-+
-+/*
-+ * Tracepoint for showing priority inheritance modifying a tasks
-+ * priority.
-+ */
-+TRACE_EVENT(sched_pi_setprio,
-+
-+ TP_PROTO(struct task_struct *tsk, int newprio),
-+
-+ TP_ARGS(tsk, newprio),
-+
-+ TP_STRUCT__entry(
-+ __array_text( char, comm, TASK_COMM_LEN )
-+ __field( pid_t, tid )
-+ __field( int, oldprio )
-+ __field( int, newprio )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
-+ tp_assign(tid, tsk->pid)
-+ tp_assign(oldprio, tsk->prio - MAX_RT_PRIO)
-+ tp_assign(newprio, newprio - MAX_RT_PRIO)
-+ ),
-+
-+ TP_printk("comm=%s tid=%d oldprio=%d newprio=%d",
-+ __entry->comm, __entry->tid,
-+ __entry->oldprio, __entry->newprio)
-+)
-+
-+#endif /* _TRACE_SCHED_H */
-+
-+/* This part must be outside protection */
-+#include "../../../probes/define_trace.h"
-diff --git a/drivers/staging/lttng/instrumentation/events/lttng-module/syscalls.h b/drivers/staging/lttng/instrumentation/events/lttng-module/syscalls.h
-new file mode 100644
-index 0000000..a2bb956
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/events/lttng-module/syscalls.h
-@@ -0,0 +1,76 @@
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM raw_syscalls
-+#define TRACE_INCLUDE_FILE syscalls
-+
-+#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_EVENTS_SYSCALLS_H
-+
-+#include <linux/tracepoint.h>
-+
-+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
-+
-+#ifndef _TRACE_SYSCALLS_DEF_
-+#define _TRACE_SYSCALLS_DEF_
-+
-+#include <asm/ptrace.h>
-+#include <asm/syscall.h>
-+
-+#endif /* _TRACE_SYSCALLS_DEF_ */
-+
-+TRACE_EVENT(sys_enter,
-+
-+ TP_PROTO(struct pt_regs *regs, long id),
-+
-+ TP_ARGS(regs, id),
-+
-+ TP_STRUCT__entry(
-+ __field( long, id )
-+ __array( unsigned long, args, 6 )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(id, id)
-+ {
-+ tp_memcpy(args,
-+ ({
-+ unsigned long args_copy[6];
-+ syscall_get_arguments(current, regs,
-+ 0, 6, args_copy);
-+ args_copy;
-+ }), 6 * sizeof(unsigned long));
-+ }
-+ ),
-+
-+ TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
-+ __entry->id,
-+ __entry->args[0], __entry->args[1], __entry->args[2],
-+ __entry->args[3], __entry->args[4], __entry->args[5])
-+)
-+
-+TRACE_EVENT(sys_exit,
-+
-+ TP_PROTO(struct pt_regs *regs, long ret),
-+
-+ TP_ARGS(regs, ret),
-+
-+ TP_STRUCT__entry(
-+ __field( long, id )
-+ __field( long, ret )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(id, syscall_get_nr(current, regs))
-+ tp_assign(ret, ret)
-+ ),
-+
-+ TP_printk("NR %ld = %ld",
-+ __entry->id, __entry->ret)
-+)
-+
-+#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
-+
-+#endif /* _TRACE_EVENTS_SYSCALLS_H */
-+
-+/* This part must be outside protection */
-+#include "../../../probes/define_trace.h"
-+
-diff --git a/drivers/staging/lttng/instrumentation/events/mainline/block.h b/drivers/staging/lttng/instrumentation/events/mainline/block.h
-new file mode 100644
-index 0000000..bf36654
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/events/mainline/block.h
-@@ -0,0 +1,569 @@
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM block
-+
-+#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_BLOCK_H
-+
-+#include <linux/blktrace_api.h>
-+#include <linux/blkdev.h>
-+#include <linux/tracepoint.h>
-+
-+DECLARE_EVENT_CLASS(block_rq_with_error,
-+
-+ TP_PROTO(struct request_queue *q, struct request *rq),
-+
-+ TP_ARGS(q, rq),
-+
-+ TP_STRUCT__entry(
-+ __field( dev_t, dev )
-+ __field( sector_t, sector )
-+ __field( unsigned int, nr_sector )
-+ __field( int, errors )
-+ __array( char, rwbs, 6 )
-+ __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
-+ __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
-+ 0 : blk_rq_pos(rq);
-+ __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
-+ 0 : blk_rq_sectors(rq);
-+ __entry->errors = rq->errors;
-+
-+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
-+ blk_dump_cmd(__get_str(cmd), rq);
-+ ),
-+
-+ TP_printk("%d,%d %s (%s) %llu + %u [%d]",
-+ MAJOR(__entry->dev), MINOR(__entry->dev),
-+ __entry->rwbs, __get_str(cmd),
-+ (unsigned long long)__entry->sector,
-+ __entry->nr_sector, __entry->errors)
-+);
-+
-+/**
-+ * block_rq_abort - abort block operation request
-+ * @q: queue containing the block operation request
-+ * @rq: block IO operation request
-+ *
-+ * Called immediately after pending block IO operation request @rq in
-+ * queue @q is aborted. The fields in the operation request @rq
-+ * can be examined to determine which device and sectors the pending
-+ * operation would access.
-+ */
-+DEFINE_EVENT(block_rq_with_error, block_rq_abort,
-+
-+ TP_PROTO(struct request_queue *q, struct request *rq),
-+
-+ TP_ARGS(q, rq)
-+);
-+
-+/**
-+ * block_rq_requeue - place block IO request back on a queue
-+ * @q: queue holding operation
-+ * @rq: block IO operation request
-+ *
-+ * The block operation request @rq is being placed back into queue
-+ * @q. For some reason the request was not completed and needs to be
-+ * put back in the queue.
-+ */
-+DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
-+
-+ TP_PROTO(struct request_queue *q, struct request *rq),
-+
-+ TP_ARGS(q, rq)
-+);
-+
-+/**
-+ * block_rq_complete - block IO operation completed by device driver
-+ * @q: queue containing the block operation request
-+ * @rq: block operations request
-+ *
-+ * The block_rq_complete tracepoint event indicates that some portion
-+ * of operation request has been completed by the device driver. If
-+ * the @rq->bio is %NULL, then there is absolutely no additional work to
-+ * do for the request. If @rq->bio is non-NULL then there is
-+ * additional work required to complete the request.
-+ */
-+DEFINE_EVENT(block_rq_with_error, block_rq_complete,
-+
-+ TP_PROTO(struct request_queue *q, struct request *rq),
-+
-+ TP_ARGS(q, rq)
-+);
-+
-+DECLARE_EVENT_CLASS(block_rq,
-+
-+ TP_PROTO(struct request_queue *q, struct request *rq),
-+
-+ TP_ARGS(q, rq),
-+
-+ TP_STRUCT__entry(
-+ __field( dev_t, dev )
-+ __field( sector_t, sector )
-+ __field( unsigned int, nr_sector )
-+ __field( unsigned int, bytes )
-+ __array( char, rwbs, 6 )
-+ __array( char, comm, TASK_COMM_LEN )
-+ __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
-+ __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
-+ 0 : blk_rq_pos(rq);
-+ __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
-+ 0 : blk_rq_sectors(rq);
-+ __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
-+ blk_rq_bytes(rq) : 0;
-+
-+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
-+ blk_dump_cmd(__get_str(cmd), rq);
-+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
-+ ),
-+
-+ TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
-+ MAJOR(__entry->dev), MINOR(__entry->dev),
-+ __entry->rwbs, __entry->bytes, __get_str(cmd),
-+ (unsigned long long)__entry->sector,
-+ __entry->nr_sector, __entry->comm)
-+);
-+
-+/**
-+ * block_rq_insert - insert block operation request into queue
-+ * @q: target queue
-+ * @rq: block IO operation request
-+ *
-+ * Called immediately before block operation request @rq is inserted
-+ * into queue @q. The fields in the operation request @rq struct can
-+ * be examined to determine which device and sectors the pending
-+ * operation would access.
-+ */
-+DEFINE_EVENT(block_rq, block_rq_insert,
-+
-+ TP_PROTO(struct request_queue *q, struct request *rq),
-+
-+ TP_ARGS(q, rq)
-+);
-+
-+/**
-+ * block_rq_issue - issue pending block IO request operation to device driver
-+ * @q: queue holding operation
-+ * @rq: block IO operation operation request
-+ *
-+ * Called when block operation request @rq from queue @q is sent to a
-+ * device driver for processing.
-+ */
-+DEFINE_EVENT(block_rq, block_rq_issue,
-+
-+ TP_PROTO(struct request_queue *q, struct request *rq),
-+
-+ TP_ARGS(q, rq)
-+);
-+
-+/**
-+ * block_bio_bounce - used bounce buffer when processing block operation
-+ * @q: queue holding the block operation
-+ * @bio: block operation
-+ *
-+ * A bounce buffer was used to handle the block operation @bio in @q.
-+ * This occurs when hardware limitations prevent a direct transfer of
-+ * data between the @bio data memory area and the IO device. Use of a
-+ * bounce buffer requires extra copying of data and decreases
-+ * performance.
-+ */
-+TRACE_EVENT(block_bio_bounce,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio),
-+
-+ TP_ARGS(q, bio),
-+
-+ TP_STRUCT__entry(
-+ __field( dev_t, dev )
-+ __field( sector_t, sector )
-+ __field( unsigned int, nr_sector )
-+ __array( char, rwbs, 6 )
-+ __array( char, comm, TASK_COMM_LEN )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->dev = bio->bi_bdev ?
-+ bio->bi_bdev->bd_dev : 0;
-+ __entry->sector = bio->bi_sector;
-+ __entry->nr_sector = bio->bi_size >> 9;
-+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
-+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
-+ ),
-+
-+ TP_printk("%d,%d %s %llu + %u [%s]",
-+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
-+ (unsigned long long)__entry->sector,
-+ __entry->nr_sector, __entry->comm)
-+);
-+
-+/**
-+ * block_bio_complete - completed all work on the block operation
-+ * @q: queue holding the block operation
-+ * @bio: block operation completed
-+ * @error: io error value
-+ *
-+ * This tracepoint indicates there is no further work to do on this
-+ * block IO operation @bio.
-+ */
-+TRACE_EVENT(block_bio_complete,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio, int error),
-+
-+ TP_ARGS(q, bio, error),
-+
-+ TP_STRUCT__entry(
-+ __field( dev_t, dev )
-+ __field( sector_t, sector )
-+ __field( unsigned, nr_sector )
-+ __field( int, error )
-+ __array( char, rwbs, 6 )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->dev = bio->bi_bdev->bd_dev;
-+ __entry->sector = bio->bi_sector;
-+ __entry->nr_sector = bio->bi_size >> 9;
-+ __entry->error = error;
-+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
-+ ),
-+
-+ TP_printk("%d,%d %s %llu + %u [%d]",
-+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
-+ (unsigned long long)__entry->sector,
-+ __entry->nr_sector, __entry->error)
-+);
-+
-+DECLARE_EVENT_CLASS(block_bio,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio),
-+
-+ TP_ARGS(q, bio),
-+
-+ TP_STRUCT__entry(
-+ __field( dev_t, dev )
-+ __field( sector_t, sector )
-+ __field( unsigned int, nr_sector )
-+ __array( char, rwbs, 6 )
-+ __array( char, comm, TASK_COMM_LEN )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->dev = bio->bi_bdev->bd_dev;
-+ __entry->sector = bio->bi_sector;
-+ __entry->nr_sector = bio->bi_size >> 9;
-+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
-+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
-+ ),
-+
-+ TP_printk("%d,%d %s %llu + %u [%s]",
-+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
-+ (unsigned long long)__entry->sector,
-+ __entry->nr_sector, __entry->comm)
-+);
-+
-+/**
-+ * block_bio_backmerge - merging block operation to the end of an existing operation
-+ * @q: queue holding operation
-+ * @bio: new block operation to merge
-+ *
-+ * Merging block request @bio to the end of an existing block request
-+ * in queue @q.
-+ */
-+DEFINE_EVENT(block_bio, block_bio_backmerge,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio),
-+
-+ TP_ARGS(q, bio)
-+);
-+
-+/**
-+ * block_bio_frontmerge - merging block operation to the beginning of an existing operation
-+ * @q: queue holding operation
-+ * @bio: new block operation to merge
-+ *
-+ * Merging block IO operation @bio to the beginning of an existing block
-+ * operation in queue @q.
-+ */
-+DEFINE_EVENT(block_bio, block_bio_frontmerge,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio),
-+
-+ TP_ARGS(q, bio)
-+);
-+
-+/**
-+ * block_bio_queue - putting new block IO operation in queue
-+ * @q: queue holding operation
-+ * @bio: new block operation
-+ *
-+ * About to place the block IO operation @bio into queue @q.
-+ */
-+DEFINE_EVENT(block_bio, block_bio_queue,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio),
-+
-+ TP_ARGS(q, bio)
-+);
-+
-+DECLARE_EVENT_CLASS(block_get_rq,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
-+
-+ TP_ARGS(q, bio, rw),
-+
-+ TP_STRUCT__entry(
-+ __field( dev_t, dev )
-+ __field( sector_t, sector )
-+ __field( unsigned int, nr_sector )
-+ __array( char, rwbs, 6 )
-+ __array( char, comm, TASK_COMM_LEN )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
-+ __entry->sector = bio ? bio->bi_sector : 0;
-+ __entry->nr_sector = bio ? bio->bi_size >> 9 : 0;
-+ blk_fill_rwbs(__entry->rwbs,
-+ bio ? bio->bi_rw : 0, __entry->nr_sector);
-+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
-+ ),
-+
-+ TP_printk("%d,%d %s %llu + %u [%s]",
-+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
-+ (unsigned long long)__entry->sector,
-+ __entry->nr_sector, __entry->comm)
-+);
-+
-+/**
-+ * block_getrq - get a free request entry in queue for block IO operations
-+ * @q: queue for operations
-+ * @bio: pending block IO operation
-+ * @rw: low bit indicates a read (%0) or a write (%1)
-+ *
-+ * A request struct for queue @q has been allocated to handle the
-+ * block IO operation @bio.
-+ */
-+DEFINE_EVENT(block_get_rq, block_getrq,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
-+
-+ TP_ARGS(q, bio, rw)
-+);
-+
-+/**
-+ * block_sleeprq - waiting to get a free request entry in queue for block IO operation
-+ * @q: queue for operation
-+ * @bio: pending block IO operation
-+ * @rw: low bit indicates a read (%0) or a write (%1)
-+ *
-+ * In the case where a request struct cannot be provided for queue @q
-+ * the process needs to wait for an request struct to become
-+ * available. This tracepoint event is generated each time the
-+ * process goes to sleep waiting for request struct become available.
-+ */
-+DEFINE_EVENT(block_get_rq, block_sleeprq,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
-+
-+ TP_ARGS(q, bio, rw)
-+);
-+
-+/**
-+ * block_plug - keep operations requests in request queue
-+ * @q: request queue to plug
-+ *
-+ * Plug the request queue @q. Do not allow block operation requests
-+ * to be sent to the device driver. Instead, accumulate requests in
-+ * the queue to improve throughput performance of the block device.
-+ */
-+TRACE_EVENT(block_plug,
-+
-+ TP_PROTO(struct request_queue *q),
-+
-+ TP_ARGS(q),
-+
-+ TP_STRUCT__entry(
-+ __array( char, comm, TASK_COMM_LEN )
-+ ),
-+
-+ TP_fast_assign(
-+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
-+ ),
-+
-+ TP_printk("[%s]", __entry->comm)
-+);
-+
-+DECLARE_EVENT_CLASS(block_unplug,
-+
-+ TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
-+
-+ TP_ARGS(q, depth, explicit),
-+
-+ TP_STRUCT__entry(
-+ __field( int, nr_rq )
-+ __array( char, comm, TASK_COMM_LEN )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->nr_rq = depth;
-+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
-+ ),
-+
-+ TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
-+);
-+
-+/**
-+ * block_unplug - release of operations requests in request queue
-+ * @q: request queue to unplug
-+ * @depth: number of requests just added to the queue
-+ * @explicit: whether this was an explicit unplug, or one from schedule()
-+ *
-+ * Unplug request queue @q because device driver is scheduled to work
-+ * on elements in the request queue.
-+ */
-+DEFINE_EVENT(block_unplug, block_unplug,
-+
-+ TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
-+
-+ TP_ARGS(q, depth, explicit)
-+);
-+
-+/**
-+ * block_split - split a single bio struct into two bio structs
-+ * @q: queue containing the bio
-+ * @bio: block operation being split
-+ * @new_sector: The starting sector for the new bio
-+ *
-+ * The bio request @bio in request queue @q needs to be split into two
-+ * bio requests. The newly created @bio request starts at
-+ * @new_sector. This split may be required due to hardware limitation
-+ * such as operation crossing device boundaries in a RAID system.
-+ */
-+TRACE_EVENT(block_split,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio,
-+ unsigned int new_sector),
-+
-+ TP_ARGS(q, bio, new_sector),
-+
-+ TP_STRUCT__entry(
-+ __field( dev_t, dev )
-+ __field( sector_t, sector )
-+ __field( sector_t, new_sector )
-+ __array( char, rwbs, 6 )
-+ __array( char, comm, TASK_COMM_LEN )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->dev = bio->bi_bdev->bd_dev;
-+ __entry->sector = bio->bi_sector;
-+ __entry->new_sector = new_sector;
-+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
-+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
-+ ),
-+
-+ TP_printk("%d,%d %s %llu / %llu [%s]",
-+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
-+ (unsigned long long)__entry->sector,
-+ (unsigned long long)__entry->new_sector,
-+ __entry->comm)
-+);
-+
-+/**
-+ * block_bio_remap - map request for a logical device to the raw device
-+ * @q: queue holding the operation
-+ * @bio: revised operation
-+ * @dev: device for the operation
-+ * @from: original sector for the operation
-+ *
-+ * An operation for a logical device has been mapped to the
-+ * raw block device.
-+ */
-+TRACE_EVENT(block_bio_remap,
-+
-+ TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
-+ sector_t from),
-+
-+ TP_ARGS(q, bio, dev, from),
-+
-+ TP_STRUCT__entry(
-+ __field( dev_t, dev )
-+ __field( sector_t, sector )
-+ __field( unsigned int, nr_sector )
-+ __field( dev_t, old_dev )
-+ __field( sector_t, old_sector )
-+ __array( char, rwbs, 6 )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->dev = bio->bi_bdev->bd_dev;
-+ __entry->sector = bio->bi_sector;
-+ __entry->nr_sector = bio->bi_size >> 9;
-+ __entry->old_dev = dev;
-+ __entry->old_sector = from;
-+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
-+ ),
-+
-+ TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
-+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
-+ (unsigned long long)__entry->sector,
-+ __entry->nr_sector,
-+ MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
-+ (unsigned long long)__entry->old_sector)
-+);
-+
-+/**
-+ * block_rq_remap - map request for a block operation request
-+ * @q: queue holding the operation
-+ * @rq: block IO operation request
-+ * @dev: device for the operation
-+ * @from: original sector for the operation
-+ *
-+ * The block operation request @rq in @q has been remapped. The block
-+ * operation request @rq holds the current information and @from hold
-+ * the original sector.
-+ */
-+TRACE_EVENT(block_rq_remap,
-+
-+ TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
-+ sector_t from),
-+
-+ TP_ARGS(q, rq, dev, from),
-+
-+ TP_STRUCT__entry(
-+ __field( dev_t, dev )
-+ __field( sector_t, sector )
-+ __field( unsigned int, nr_sector )
-+ __field( dev_t, old_dev )
-+ __field( sector_t, old_sector )
-+ __array( char, rwbs, 6 )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->dev = disk_devt(rq->rq_disk);
-+ __entry->sector = blk_rq_pos(rq);
-+ __entry->nr_sector = blk_rq_sectors(rq);
-+ __entry->old_dev = dev;
-+ __entry->old_sector = from;
-+ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
-+ ),
-+
-+ TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
-+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
-+ (unsigned long long)__entry->sector,
-+ __entry->nr_sector,
-+ MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
-+ (unsigned long long)__entry->old_sector)
-+);
-+
-+#endif /* _TRACE_BLOCK_H */
-+
-+/* This part must be outside protection */
-+#include <trace/define_trace.h>
-+
-diff --git a/drivers/staging/lttng/instrumentation/events/mainline/irq.h b/drivers/staging/lttng/instrumentation/events/mainline/irq.h
-new file mode 100644
-index 0000000..1c09820
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/events/mainline/irq.h
-@@ -0,0 +1,150 @@
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM irq
-+
-+#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_IRQ_H
-+
-+#include <linux/tracepoint.h>
-+
-+struct irqaction;
-+struct softirq_action;
-+
-+#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
-+#define show_softirq_name(val) \
-+ __print_symbolic(val, \
-+ softirq_name(HI), \
-+ softirq_name(TIMER), \
-+ softirq_name(NET_TX), \
-+ softirq_name(NET_RX), \
-+ softirq_name(BLOCK), \
-+ softirq_name(BLOCK_IOPOLL), \
-+ softirq_name(TASKLET), \
-+ softirq_name(SCHED), \
-+ softirq_name(HRTIMER), \
-+ softirq_name(RCU))
-+
-+/**
-+ * irq_handler_entry - called immediately before the irq action handler
-+ * @irq: irq number
-+ * @action: pointer to struct irqaction
-+ *
-+ * The struct irqaction pointed to by @action contains various
-+ * information about the handler, including the device name,
-+ * @action->name, and the device id, @action->dev_id. When used in
-+ * conjunction with the irq_handler_exit tracepoint, we can figure
-+ * out irq handler latencies.
-+ */
-+TRACE_EVENT(irq_handler_entry,
-+
-+ TP_PROTO(int irq, struct irqaction *action),
-+
-+ TP_ARGS(irq, action),
-+
-+ TP_STRUCT__entry(
-+ __field( int, irq )
-+ __string( name, action->name )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->irq = irq;
-+ __assign_str(name, action->name);
-+ ),
-+
-+ TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
-+);
-+
-+/**
-+ * irq_handler_exit - called immediately after the irq action handler returns
-+ * @irq: irq number
-+ * @action: pointer to struct irqaction
-+ * @ret: return value
-+ *
-+ * If the @ret value is set to IRQ_HANDLED, then we know that the corresponding
-+ * @action->handler scuccessully handled this irq. Otherwise, the irq might be
-+ * a shared irq line, or the irq was not handled successfully. Can be used in
-+ * conjunction with the irq_handler_entry to understand irq handler latencies.
-+ */
-+TRACE_EVENT(irq_handler_exit,
-+
-+ TP_PROTO(int irq, struct irqaction *action, int ret),
-+
-+ TP_ARGS(irq, action, ret),
-+
-+ TP_STRUCT__entry(
-+ __field( int, irq )
-+ __field( int, ret )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->irq = irq;
-+ __entry->ret = ret;
-+ ),
-+
-+ TP_printk("irq=%d ret=%s",
-+ __entry->irq, __entry->ret ? "handled" : "unhandled")
-+);
-+
-+DECLARE_EVENT_CLASS(softirq,
-+
-+ TP_PROTO(unsigned int vec_nr),
-+
-+ TP_ARGS(vec_nr),
-+
-+ TP_STRUCT__entry(
-+ __field( unsigned int, vec )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->vec = vec_nr;
-+ ),
-+
-+ TP_printk("vec=%u [action=%s]", __entry->vec,
-+ show_softirq_name(__entry->vec))
-+);
-+
-+/**
-+ * softirq_entry - called immediately before the softirq handler
-+ * @vec_nr: softirq vector number
-+ *
-+ * When used in combination with the softirq_exit tracepoint
-+ * we can determine the softirq handler runtine.
-+ */
-+DEFINE_EVENT(softirq, softirq_entry,
-+
-+ TP_PROTO(unsigned int vec_nr),
-+
-+ TP_ARGS(vec_nr)
-+);
-+
-+/**
-+ * softirq_exit - called immediately after the softirq handler returns
-+ * @vec_nr: softirq vector number
-+ *
-+ * When used in combination with the softirq_entry tracepoint
-+ * we can determine the softirq handler runtine.
-+ */
-+DEFINE_EVENT(softirq, softirq_exit,
-+
-+ TP_PROTO(unsigned int vec_nr),
-+
-+ TP_ARGS(vec_nr)
-+);
-+
-+/**
-+ * softirq_raise - called immediately when a softirq is raised
-+ * @vec_nr: softirq vector number
-+ *
-+ * When used in combination with the softirq_entry tracepoint
-+ * we can determine the softirq raise to run latency.
-+ */
-+DEFINE_EVENT(softirq, softirq_raise,
-+
-+ TP_PROTO(unsigned int vec_nr),
-+
-+ TP_ARGS(vec_nr)
-+);
-+
-+#endif /* _TRACE_IRQ_H */
-+
-+/* This part must be outside protection */
-+#include <trace/define_trace.h>
-diff --git a/drivers/staging/lttng/instrumentation/events/mainline/kvm.h b/drivers/staging/lttng/instrumentation/events/mainline/kvm.h
-new file mode 100644
-index 0000000..46e3cd8
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/events/mainline/kvm.h
-@@ -0,0 +1,312 @@
-+#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_KVM_MAIN_H
-+
-+#include <linux/tracepoint.h>
-+
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM kvm
-+
-+#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
-+
-+#define kvm_trace_exit_reason \
-+ ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
-+ ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
-+ ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
-+ ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
-+ ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI)
-+
-+TRACE_EVENT(kvm_userspace_exit,
-+ TP_PROTO(__u32 reason, int errno),
-+ TP_ARGS(reason, errno),
-+
-+ TP_STRUCT__entry(
-+ __field( __u32, reason )
-+ __field( int, errno )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->reason = reason;
-+ __entry->errno = errno;
-+ ),
-+
-+ TP_printk("reason %s (%d)",
-+ __entry->errno < 0 ?
-+ (__entry->errno == -EINTR ? "restart" : "error") :
-+ __print_symbolic(__entry->reason, kvm_trace_exit_reason),
-+ __entry->errno < 0 ? -__entry->errno : __entry->reason)
-+);
-+
-+#if defined(__KVM_HAVE_IOAPIC)
-+TRACE_EVENT(kvm_set_irq,
-+ TP_PROTO(unsigned int gsi, int level, int irq_source_id),
-+ TP_ARGS(gsi, level, irq_source_id),
-+
-+ TP_STRUCT__entry(
-+ __field( unsigned int, gsi )
-+ __field( int, level )
-+ __field( int, irq_source_id )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->gsi = gsi;
-+ __entry->level = level;
-+ __entry->irq_source_id = irq_source_id;
-+ ),
-+
-+ TP_printk("gsi %u level %d source %d",
-+ __entry->gsi, __entry->level, __entry->irq_source_id)
-+);
-+
-+#define kvm_deliver_mode \
-+ {0x0, "Fixed"}, \
-+ {0x1, "LowPrio"}, \
-+ {0x2, "SMI"}, \
-+ {0x3, "Res3"}, \
-+ {0x4, "NMI"}, \
-+ {0x5, "INIT"}, \
-+ {0x6, "SIPI"}, \
-+ {0x7, "ExtINT"}
-+
-+TRACE_EVENT(kvm_ioapic_set_irq,
-+ TP_PROTO(__u64 e, int pin, bool coalesced),
-+ TP_ARGS(e, pin, coalesced),
-+
-+ TP_STRUCT__entry(
-+ __field( __u64, e )
-+ __field( int, pin )
-+ __field( bool, coalesced )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->e = e;
-+ __entry->pin = pin;
-+ __entry->coalesced = coalesced;
-+ ),
-+
-+ TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
-+ __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
-+ __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
-+ (__entry->e & (1<<11)) ? "logical" : "physical",
-+ (__entry->e & (1<<15)) ? "level" : "edge",
-+ (__entry->e & (1<<16)) ? "|masked" : "",
-+ __entry->coalesced ? " (coalesced)" : "")
-+);
-+
-+TRACE_EVENT(kvm_msi_set_irq,
-+ TP_PROTO(__u64 address, __u64 data),
-+ TP_ARGS(address, data),
-+
-+ TP_STRUCT__entry(
-+ __field( __u64, address )
-+ __field( __u64, data )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->address = address;
-+ __entry->data = data;
-+ ),
-+
-+ TP_printk("dst %u vec %x (%s|%s|%s%s)",
-+ (u8)(__entry->address >> 12), (u8)__entry->data,
-+ __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
-+ (__entry->address & (1<<2)) ? "logical" : "physical",
-+ (__entry->data & (1<<15)) ? "level" : "edge",
-+ (__entry->address & (1<<3)) ? "|rh" : "")
-+);
-+
-+#define kvm_irqchips \
-+ {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
-+ {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
-+ {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
-+
-+TRACE_EVENT(kvm_ack_irq,
-+ TP_PROTO(unsigned int irqchip, unsigned int pin),
-+ TP_ARGS(irqchip, pin),
-+
-+ TP_STRUCT__entry(
-+ __field( unsigned int, irqchip )
-+ __field( unsigned int, pin )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->irqchip = irqchip;
-+ __entry->pin = pin;
-+ ),
-+
-+ TP_printk("irqchip %s pin %u",
-+ __print_symbolic(__entry->irqchip, kvm_irqchips),
-+ __entry->pin)
-+);
-+
-+
-+
-+#endif /* defined(__KVM_HAVE_IOAPIC) */
-+
-+#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
-+#define KVM_TRACE_MMIO_READ 1
-+#define KVM_TRACE_MMIO_WRITE 2
-+
-+#define kvm_trace_symbol_mmio \
-+ { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
-+ { KVM_TRACE_MMIO_READ, "read" }, \
-+ { KVM_TRACE_MMIO_WRITE, "write" }
-+
-+TRACE_EVENT(kvm_mmio,
-+ TP_PROTO(int type, int len, u64 gpa, u64 val),
-+ TP_ARGS(type, len, gpa, val),
-+
-+ TP_STRUCT__entry(
-+ __field( u32, type )
-+ __field( u32, len )
-+ __field( u64, gpa )
-+ __field( u64, val )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->type = type;
-+ __entry->len = len;
-+ __entry->gpa = gpa;
-+ __entry->val = val;
-+ ),
-+
-+ TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
-+ __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
-+ __entry->len, __entry->gpa, __entry->val)
-+);
-+
-+#define kvm_fpu_load_symbol \
-+ {0, "unload"}, \
-+ {1, "load"}
-+
-+TRACE_EVENT(kvm_fpu,
-+ TP_PROTO(int load),
-+ TP_ARGS(load),
-+
-+ TP_STRUCT__entry(
-+ __field( u32, load )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->load = load;
-+ ),
-+
-+ TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
-+);
-+
-+TRACE_EVENT(kvm_age_page,
-+ TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
-+ TP_ARGS(hva, slot, ref),
-+
-+ TP_STRUCT__entry(
-+ __field( u64, hva )
-+ __field( u64, gfn )
-+ __field( u8, referenced )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->hva = hva;
-+ __entry->gfn =
-+ slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT);
-+ __entry->referenced = ref;
-+ ),
-+
-+ TP_printk("hva %llx gfn %llx %s",
-+ __entry->hva, __entry->gfn,
-+ __entry->referenced ? "YOUNG" : "OLD")
-+);
-+
-+#ifdef CONFIG_KVM_ASYNC_PF
-+DECLARE_EVENT_CLASS(kvm_async_get_page_class,
-+
-+ TP_PROTO(u64 gva, u64 gfn),
-+
-+ TP_ARGS(gva, gfn),
-+
-+ TP_STRUCT__entry(
-+ __field(__u64, gva)
-+ __field(u64, gfn)
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->gva = gva;
-+ __entry->gfn = gfn;
-+ ),
-+
-+ TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
-+);
-+
-+DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
-+
-+ TP_PROTO(u64 gva, u64 gfn),
-+
-+ TP_ARGS(gva, gfn)
-+);
-+
-+DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
-+
-+ TP_PROTO(u64 gva, u64 gfn),
-+
-+ TP_ARGS(gva, gfn)
-+);
-+
-+DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
-+
-+ TP_PROTO(u64 token, u64 gva),
-+
-+ TP_ARGS(token, gva),
-+
-+ TP_STRUCT__entry(
-+ __field(__u64, token)
-+ __field(__u64, gva)
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->token = token;
-+ __entry->gva = gva;
-+ ),
-+
-+ TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
-+
-+);
-+
-+DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
-+
-+ TP_PROTO(u64 token, u64 gva),
-+
-+ TP_ARGS(token, gva)
-+);
-+
-+DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
-+
-+ TP_PROTO(u64 token, u64 gva),
-+
-+ TP_ARGS(token, gva)
-+);
-+
-+TRACE_EVENT(
-+ kvm_async_pf_completed,
-+ TP_PROTO(unsigned long address, struct page *page, u64 gva),
-+ TP_ARGS(address, page, gva),
-+
-+ TP_STRUCT__entry(
-+ __field(unsigned long, address)
-+ __field(pfn_t, pfn)
-+ __field(u64, gva)
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->address = address;
-+ __entry->pfn = page ? page_to_pfn(page) : 0;
-+ __entry->gva = gva;
-+ ),
-+
-+ TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva,
-+ __entry->address, __entry->pfn)
-+);
-+
-+#endif
-+
-+#endif /* _TRACE_KVM_MAIN_H */
-+
-+/* This part must be outside protection */
-+#include <trace/define_trace.h>
-diff --git a/drivers/staging/lttng/instrumentation/events/mainline/sched.h b/drivers/staging/lttng/instrumentation/events/mainline/sched.h
-new file mode 100644
-index 0000000..f633478
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/events/mainline/sched.h
-@@ -0,0 +1,397 @@
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM sched
-+
-+#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_SCHED_H
-+
-+#include <linux/sched.h>
-+#include <linux/tracepoint.h>
-+
-+/*
-+ * Tracepoint for calling kthread_stop, performed to end a kthread:
-+ */
-+TRACE_EVENT(sched_kthread_stop,
-+
-+ TP_PROTO(struct task_struct *t),
-+
-+ TP_ARGS(t),
-+
-+ TP_STRUCT__entry(
-+ __array( char, comm, TASK_COMM_LEN )
-+ __field( pid_t, pid )
-+ ),
-+
-+ TP_fast_assign(
-+ memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
-+ __entry->pid = t->pid;
-+ ),
-+
-+ TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
-+);
-+
-+/*
-+ * Tracepoint for the return value of the kthread stopping:
-+ */
-+TRACE_EVENT(sched_kthread_stop_ret,
-+
-+ TP_PROTO(int ret),
-+
-+ TP_ARGS(ret),
-+
-+ TP_STRUCT__entry(
-+ __field( int, ret )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->ret = ret;
-+ ),
-+
-+ TP_printk("ret=%d", __entry->ret)
-+);
-+
-+/*
-+ * Tracepoint for waking up a task:
-+ */
-+DECLARE_EVENT_CLASS(sched_wakeup_template,
-+
-+ TP_PROTO(struct task_struct *p, int success),
-+
-+ TP_ARGS(p, success),
-+
-+ TP_STRUCT__entry(
-+ __array( char, comm, TASK_COMM_LEN )
-+ __field( pid_t, pid )
-+ __field( int, prio )
-+ __field( int, success )
-+ __field( int, target_cpu )
-+ ),
-+
-+ TP_fast_assign(
-+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
-+ __entry->pid = p->pid;
-+ __entry->prio = p->prio;
-+ __entry->success = success;
-+ __entry->target_cpu = task_cpu(p);
-+ ),
-+
-+ TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
-+ __entry->comm, __entry->pid, __entry->prio,
-+ __entry->success, __entry->target_cpu)
-+);
-+
-+DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
-+ TP_PROTO(struct task_struct *p, int success),
-+ TP_ARGS(p, success));
-+
-+/*
-+ * Tracepoint for waking up a new task:
-+ */
-+DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
-+ TP_PROTO(struct task_struct *p, int success),
-+ TP_ARGS(p, success));
-+
-+#ifdef CREATE_TRACE_POINTS
-+static inline long __trace_sched_switch_state(struct task_struct *p)
-+{
-+ long state = p->state;
-+
-+#ifdef CONFIG_PREEMPT
-+ /*
-+ * For all intents and purposes a preempted task is a running task.
-+ */
-+ if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
-+ state = TASK_RUNNING;
-+#endif
-+
-+ return state;
-+}
-+#endif
-+
-+/*
-+ * Tracepoint for task switches, performed by the scheduler:
-+ */
-+TRACE_EVENT(sched_switch,
-+
-+ TP_PROTO(struct task_struct *prev,
-+ struct task_struct *next),
-+
-+ TP_ARGS(prev, next),
-+
-+ TP_STRUCT__entry(
-+ __array( char, prev_comm, TASK_COMM_LEN )
-+ __field( pid_t, prev_pid )
-+ __field( int, prev_prio )
-+ __field( long, prev_state )
-+ __array( char, next_comm, TASK_COMM_LEN )
-+ __field( pid_t, next_pid )
-+ __field( int, next_prio )
-+ ),
-+
-+ TP_fast_assign(
-+ memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
-+ __entry->prev_pid = prev->pid;
-+ __entry->prev_prio = prev->prio;
-+ __entry->prev_state = __trace_sched_switch_state(prev);
-+ memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
-+ __entry->next_pid = next->pid;
-+ __entry->next_prio = next->prio;
-+ ),
-+
-+ TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d",
-+ __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
-+ __entry->prev_state ?
-+ __print_flags(__entry->prev_state, "|",
-+ { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
-+ { 16, "Z" }, { 32, "X" }, { 64, "x" },
-+ { 128, "W" }) : "R",
-+ __entry->next_comm, __entry->next_pid, __entry->next_prio)
-+);
-+
-+/*
-+ * Tracepoint for a task being migrated:
-+ */
-+TRACE_EVENT(sched_migrate_task,
-+
-+ TP_PROTO(struct task_struct *p, int dest_cpu),
-+
-+ TP_ARGS(p, dest_cpu),
-+
-+ TP_STRUCT__entry(
-+ __array( char, comm, TASK_COMM_LEN )
-+ __field( pid_t, pid )
-+ __field( int, prio )
-+ __field( int, orig_cpu )
-+ __field( int, dest_cpu )
-+ ),
-+
-+ TP_fast_assign(
-+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
-+ __entry->pid = p->pid;
-+ __entry->prio = p->prio;
-+ __entry->orig_cpu = task_cpu(p);
-+ __entry->dest_cpu = dest_cpu;
-+ ),
-+
-+ TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
-+ __entry->comm, __entry->pid, __entry->prio,
-+ __entry->orig_cpu, __entry->dest_cpu)
-+);
-+
-+DECLARE_EVENT_CLASS(sched_process_template,
-+
-+ TP_PROTO(struct task_struct *p),
-+
-+ TP_ARGS(p),
-+
-+ TP_STRUCT__entry(
-+ __array( char, comm, TASK_COMM_LEN )
-+ __field( pid_t, pid )
-+ __field( int, prio )
-+ ),
-+
-+ TP_fast_assign(
-+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
-+ __entry->pid = p->pid;
-+ __entry->prio = p->prio;
-+ ),
-+
-+ TP_printk("comm=%s pid=%d prio=%d",
-+ __entry->comm, __entry->pid, __entry->prio)
-+);
-+
-+/*
-+ * Tracepoint for freeing a task:
-+ */
-+DEFINE_EVENT(sched_process_template, sched_process_free,
-+ TP_PROTO(struct task_struct *p),
-+ TP_ARGS(p));
-+
-+
-+/*
-+ * Tracepoint for a task exiting:
-+ */
-+DEFINE_EVENT(sched_process_template, sched_process_exit,
-+ TP_PROTO(struct task_struct *p),
-+ TP_ARGS(p));
-+
-+/*
-+ * Tracepoint for waiting on task to unschedule:
-+ */
-+DEFINE_EVENT(sched_process_template, sched_wait_task,
-+ TP_PROTO(struct task_struct *p),
-+ TP_ARGS(p));
-+
-+/*
-+ * Tracepoint for a waiting task:
-+ */
-+TRACE_EVENT(sched_process_wait,
-+
-+ TP_PROTO(struct pid *pid),
-+
-+ TP_ARGS(pid),
-+
-+ TP_STRUCT__entry(
-+ __array( char, comm, TASK_COMM_LEN )
-+ __field( pid_t, pid )
-+ __field( int, prio )
-+ ),
-+
-+ TP_fast_assign(
-+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
-+ __entry->pid = pid_nr(pid);
-+ __entry->prio = current->prio;
-+ ),
-+
-+ TP_printk("comm=%s pid=%d prio=%d",
-+ __entry->comm, __entry->pid, __entry->prio)
-+);
-+
-+/*
-+ * Tracepoint for do_fork:
-+ */
-+TRACE_EVENT(sched_process_fork,
-+
-+ TP_PROTO(struct task_struct *parent, struct task_struct *child),
-+
-+ TP_ARGS(parent, child),
-+
-+ TP_STRUCT__entry(
-+ __array( char, parent_comm, TASK_COMM_LEN )
-+ __field( pid_t, parent_pid )
-+ __array( char, child_comm, TASK_COMM_LEN )
-+ __field( pid_t, child_pid )
-+ ),
-+
-+ TP_fast_assign(
-+ memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
-+ __entry->parent_pid = parent->pid;
-+ memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
-+ __entry->child_pid = child->pid;
-+ ),
-+
-+ TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
-+ __entry->parent_comm, __entry->parent_pid,
-+ __entry->child_comm, __entry->child_pid)
-+);
-+
-+/*
-+ * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
-+ * adding sched_stat support to SCHED_FIFO/RR would be welcome.
-+ */
-+DECLARE_EVENT_CLASS(sched_stat_template,
-+
-+ TP_PROTO(struct task_struct *tsk, u64 delay),
-+
-+ TP_ARGS(tsk, delay),
-+
-+ TP_STRUCT__entry(
-+ __array( char, comm, TASK_COMM_LEN )
-+ __field( pid_t, pid )
-+ __field( u64, delay )
-+ ),
-+
-+ TP_fast_assign(
-+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
-+ __entry->pid = tsk->pid;
-+ __entry->delay = delay;
-+ )
-+ TP_perf_assign(
-+ __perf_count(delay);
-+ ),
-+
-+ TP_printk("comm=%s pid=%d delay=%Lu [ns]",
-+ __entry->comm, __entry->pid,
-+ (unsigned long long)__entry->delay)
-+);
-+
-+
-+/*
-+ * Tracepoint for accounting wait time (time the task is runnable
-+ * but not actually running due to scheduler contention).
-+ */
-+DEFINE_EVENT(sched_stat_template, sched_stat_wait,
-+ TP_PROTO(struct task_struct *tsk, u64 delay),
-+ TP_ARGS(tsk, delay));
-+
-+/*
-+ * Tracepoint for accounting sleep time (time the task is not runnable,
-+ * including iowait, see below).
-+ */
-+DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
-+ TP_PROTO(struct task_struct *tsk, u64 delay),
-+ TP_ARGS(tsk, delay));
-+
-+/*
-+ * Tracepoint for accounting iowait time (time the task is not runnable
-+ * due to waiting on IO to complete).
-+ */
-+DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
-+ TP_PROTO(struct task_struct *tsk, u64 delay),
-+ TP_ARGS(tsk, delay));
-+
-+/*
-+ * Tracepoint for accounting runtime (time the task is executing
-+ * on a CPU).
-+ */
-+TRACE_EVENT(sched_stat_runtime,
-+
-+ TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
-+
-+ TP_ARGS(tsk, runtime, vruntime),
-+
-+ TP_STRUCT__entry(
-+ __array( char, comm, TASK_COMM_LEN )
-+ __field( pid_t, pid )
-+ __field( u64, runtime )
-+ __field( u64, vruntime )
-+ ),
-+
-+ TP_fast_assign(
-+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
-+ __entry->pid = tsk->pid;
-+ __entry->runtime = runtime;
-+ __entry->vruntime = vruntime;
-+ )
-+ TP_perf_assign(
-+ __perf_count(runtime);
-+ ),
-+
-+ TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
-+ __entry->comm, __entry->pid,
-+ (unsigned long long)__entry->runtime,
-+ (unsigned long long)__entry->vruntime)
-+);
-+
-+/*
-+ * Tracepoint for showing priority inheritance modifying a tasks
-+ * priority.
-+ */
-+TRACE_EVENT(sched_pi_setprio,
-+
-+ TP_PROTO(struct task_struct *tsk, int newprio),
-+
-+ TP_ARGS(tsk, newprio),
-+
-+ TP_STRUCT__entry(
-+ __array( char, comm, TASK_COMM_LEN )
-+ __field( pid_t, pid )
-+ __field( int, oldprio )
-+ __field( int, newprio )
-+ ),
-+
-+ TP_fast_assign(
-+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
-+ __entry->pid = tsk->pid;
-+ __entry->oldprio = tsk->prio;
-+ __entry->newprio = newprio;
-+ ),
-+
-+ TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
-+ __entry->comm, __entry->pid,
-+ __entry->oldprio, __entry->newprio)
-+);
-+
-+#endif /* _TRACE_SCHED_H */
-+
-+/* This part must be outside protection */
-+#include <trace/define_trace.h>
-diff --git a/drivers/staging/lttng/instrumentation/events/mainline/syscalls.h b/drivers/staging/lttng/instrumentation/events/mainline/syscalls.h
-new file mode 100644
-index 0000000..5a4c04a
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/events/mainline/syscalls.h
-@@ -0,0 +1,75 @@
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM raw_syscalls
-+#define TRACE_INCLUDE_FILE syscalls
-+
-+#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_EVENTS_SYSCALLS_H
-+
-+#include <linux/tracepoint.h>
-+
-+#include <asm/ptrace.h>
-+#include <asm/syscall.h>
-+
-+
-+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
-+
-+extern void syscall_regfunc(void);
-+extern void syscall_unregfunc(void);
-+
-+TRACE_EVENT_FN(sys_enter,
-+
-+ TP_PROTO(struct pt_regs *regs, long id),
-+
-+ TP_ARGS(regs, id),
-+
-+ TP_STRUCT__entry(
-+ __field( long, id )
-+ __array( unsigned long, args, 6 )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->id = id;
-+ syscall_get_arguments(current, regs, 0, 6, __entry->args);
-+ ),
-+
-+ TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
-+ __entry->id,
-+ __entry->args[0], __entry->args[1], __entry->args[2],
-+ __entry->args[3], __entry->args[4], __entry->args[5]),
-+
-+ syscall_regfunc, syscall_unregfunc
-+);
-+
-+TRACE_EVENT_FLAGS(sys_enter, TRACE_EVENT_FL_CAP_ANY)
-+
-+TRACE_EVENT_FN(sys_exit,
-+
-+ TP_PROTO(struct pt_regs *regs, long ret),
-+
-+ TP_ARGS(regs, ret),
-+
-+ TP_STRUCT__entry(
-+ __field( long, id )
-+ __field( long, ret )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->id = syscall_get_nr(current, regs);
-+ __entry->ret = ret;
-+ ),
-+
-+ TP_printk("NR %ld = %ld",
-+ __entry->id, __entry->ret),
-+
-+ syscall_regfunc, syscall_unregfunc
-+);
-+
-+TRACE_EVENT_FLAGS(sys_exit, TRACE_EVENT_FL_CAP_ANY)
-+
-+#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
-+
-+#endif /* _TRACE_EVENTS_SYSCALLS_H */
-+
-+/* This part must be outside protection */
-+#include <trace/define_trace.h>
-+
---
-1.7.9
-
diff --git a/patches.lttng/0008-lttng-syscall-instrumentation.patch b/patches.lttng/0008-lttng-syscall-instrumentation.patch
deleted file mode 100644
index 1ddac7a9807..00000000000
--- a/patches.lttng/0008-lttng-syscall-instrumentation.patch
+++ /dev/null
@@ -1,7758 +0,0 @@
-From 54a69e5511f5031d3d14e1418ef7ea2456a73684 Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 28 Nov 2011 07:42:16 -0500
-Subject: lttng: syscall instrumentation
-
-x86-32 and x86-64 system call instrumentation, along with the
-lttng-syscalls-generate-headers.sh script that generates the headers
-from the system call list. See README for details.
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- .../syscalls/3.0.4/x86-64-syscalls-3.0.4 | 263 +++
- .../syscalls/3.1.0-rc6/x86-32-syscalls-3.1.0-rc6 | 291 +++
- .../staging/lttng/instrumentation/syscalls/README | 18 +
- .../syscalls/headers/compat_syscalls_integers.h | 3 +
- .../syscalls/headers/compat_syscalls_pointers.h | 3 +
- .../syscalls/headers/syscalls_integers.h | 7 +
- .../syscalls/headers/syscalls_integers_override.h | 14 +
- .../syscalls/headers/syscalls_pointers.h | 7 +
- .../syscalls/headers/syscalls_pointers_override.h | 4 +
- .../syscalls/headers/syscalls_unknown.h | 55 +
- .../headers/x86-32-syscalls-3.1.0-rc6_integers.h | 1163 ++++++++++
- .../x86-32-syscalls-3.1.0-rc6_integers_override.h | 38 +
- .../headers/x86-32-syscalls-3.1.0-rc6_pointers.h | 2232 ++++++++++++++++++++
- .../x86-32-syscalls-3.1.0-rc6_pointers_override.h | 17 +
- .../headers/x86-64-syscalls-3.0.4_integers.h | 1013 +++++++++
- .../x86-64-syscalls-3.0.4_integers_override.h | 3 +
- .../headers/x86-64-syscalls-3.0.4_pointers.h | 2076 ++++++++++++++++++
- .../x86-64-syscalls-3.0.4_pointers_override.h | 5 +
- .../syscalls/lttng-syscalls-extractor/Makefile | 1 +
- .../lttng-syscalls-extractor.c | 85 +
- .../syscalls/lttng-syscalls-generate-headers.sh | 275 +++
- 21 files changed, 7573 insertions(+), 0 deletions(-)
- create mode 100644 drivers/staging/lttng/instrumentation/syscalls/3.0.4/x86-64-syscalls-3.0.4
- create mode 100644 drivers/staging/lttng/instrumentation/syscalls/3.1.0-rc6/x86-32-syscalls-3.1.0-rc6
- create mode 100644 drivers/staging/lttng/instrumentation/syscalls/README
- create mode 100644 drivers/staging/lttng/instrumentation/syscalls/headers/compat_syscalls_integers.h
- create mode 100644 drivers/staging/lttng/instrumentation/syscalls/headers/compat_syscalls_pointers.h
- create mode 100644 drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_integers.h
- create mode 100644 drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_integers_override.h
- create mode 100644 drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers.h
- create mode 100644 drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers_override.h
- create mode 100644 drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_unknown.h
- create mode 100644 drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_integers.h
- create mode 100644 drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_integers_override.h
- create mode 100644 drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers.h
- create mode 100644 drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers_override.h
- create mode 100644 drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_integers.h
- create mode 100644 drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_integers_override.h
- create mode 100644 drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_pointers.h
- create mode 100644 drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_pointers_override.h
- create mode 100644 drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/Makefile
- create mode 100644 drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/lttng-syscalls-extractor.c
- create mode 100644 drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-generate-headers.sh
-
-diff --git a/drivers/staging/lttng/instrumentation/syscalls/3.0.4/x86-64-syscalls-3.0.4 b/drivers/staging/lttng/instrumentation/syscalls/3.0.4/x86-64-syscalls-3.0.4
-new file mode 100644
-index 0000000..b229472
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/syscalls/3.0.4/x86-64-syscalls-3.0.4
-@@ -0,0 +1,263 @@
-+syscall sys_read nr 0 nbargs 3 types: (unsigned int, char *, size_t) args: (fd, buf, count)
-+syscall sys_write nr 1 nbargs 3 types: (unsigned int, const char *, size_t) args: (fd, buf, count)
-+syscall sys_open nr 2 nbargs 3 types: (const char *, int, int) args: (filename, flags, mode)
-+syscall sys_close nr 3 nbargs 1 types: (unsigned int) args: (fd)
-+syscall sys_newstat nr 4 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
-+syscall sys_newfstat nr 5 nbargs 2 types: (unsigned int, struct stat *) args: (fd, statbuf)
-+syscall sys_newlstat nr 6 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
-+syscall sys_poll nr 7 nbargs 3 types: (struct pollfd *, unsigned int, long) args: (ufds, nfds, timeout_msecs)
-+syscall sys_lseek nr 8 nbargs 3 types: (unsigned int, off_t, unsigned int) args: (fd, offset, origin)
-+syscall sys_mmap nr 9 nbargs 6 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (addr, len, prot, flags, fd, off)
-+syscall sys_mprotect nr 10 nbargs 3 types: (unsigned long, size_t, unsigned long) args: (start, len, prot)
-+syscall sys_munmap nr 11 nbargs 2 types: (unsigned long, size_t) args: (addr, len)
-+syscall sys_brk nr 12 nbargs 1 types: (unsigned long) args: (brk)
-+syscall sys_rt_sigaction nr 13 nbargs 4 types: (int, const struct sigaction *, struct sigaction *, size_t) args: (sig, act, oact, sigsetsize)
-+syscall sys_rt_sigprocmask nr 14 nbargs 4 types: (int, sigset_t *, sigset_t *, size_t) args: (how, nset, oset, sigsetsize)
-+syscall sys_ioctl nr 16 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
-+syscall sys_readv nr 19 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
-+syscall sys_writev nr 20 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
-+syscall sys_access nr 21 nbargs 2 types: (const char *, int) args: (filename, mode)
-+syscall sys_pipe nr 22 nbargs 1 types: (int *) args: (fildes)
-+syscall sys_select nr 23 nbargs 5 types: (int, fd_set *, fd_set *, fd_set *, struct timeval *) args: (n, inp, outp, exp, tvp)
-+syscall sys_sched_yield nr 24 nbargs 0 types: () args: ()
-+syscall sys_mremap nr 25 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (addr, old_len, new_len, flags, new_addr)
-+syscall sys_msync nr 26 nbargs 3 types: (unsigned long, size_t, int) args: (start, len, flags)
-+syscall sys_mincore nr 27 nbargs 3 types: (unsigned long, size_t, unsigned char *) args: (start, len, vec)
-+syscall sys_madvise nr 28 nbargs 3 types: (unsigned long, size_t, int) args: (start, len_in, behavior)
-+syscall sys_shmget nr 29 nbargs 3 types: (key_t, size_t, int) args: (key, size, shmflg)
-+syscall sys_shmat nr 30 nbargs 3 types: (int, char *, int) args: (shmid, shmaddr, shmflg)
-+syscall sys_shmctl nr 31 nbargs 3 types: (int, int, struct shmid_ds *) args: (shmid, cmd, buf)
-+syscall sys_dup nr 32 nbargs 1 types: (unsigned int) args: (fildes)
-+syscall sys_dup2 nr 33 nbargs 2 types: (unsigned int, unsigned int) args: (oldfd, newfd)
-+syscall sys_pause nr 34 nbargs 0 types: () args: ()
-+syscall sys_nanosleep nr 35 nbargs 2 types: (struct timespec *, struct timespec *) args: (rqtp, rmtp)
-+syscall sys_getitimer nr 36 nbargs 2 types: (int, struct itimerval *) args: (which, value)
-+syscall sys_alarm nr 37 nbargs 1 types: (unsigned int) args: (seconds)
-+syscall sys_setitimer nr 38 nbargs 3 types: (int, struct itimerval *, struct itimerval *) args: (which, value, ovalue)
-+syscall sys_getpid nr 39 nbargs 0 types: () args: ()
-+syscall sys_sendfile64 nr 40 nbargs 4 types: (int, int, loff_t *, size_t) args: (out_fd, in_fd, offset, count)
-+syscall sys_socket nr 41 nbargs 3 types: (int, int, int) args: (family, type, protocol)
-+syscall sys_connect nr 42 nbargs 3 types: (int, struct sockaddr *, int) args: (fd, uservaddr, addrlen)
-+syscall sys_accept nr 43 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, upeer_sockaddr, upeer_addrlen)
-+syscall sys_sendto nr 44 nbargs 6 types: (int, void *, size_t, unsigned, struct sockaddr *, int) args: (fd, buff, len, flags, addr, addr_len)
-+syscall sys_recvfrom nr 45 nbargs 6 types: (int, void *, size_t, unsigned, struct sockaddr *, int *) args: (fd, ubuf, size, flags, addr, addr_len)
-+syscall sys_sendmsg nr 46 nbargs 3 types: (int, struct msghdr *, unsigned) args: (fd, msg, flags)
-+syscall sys_recvmsg nr 47 nbargs 3 types: (int, struct msghdr *, unsigned int) args: (fd, msg, flags)
-+syscall sys_shutdown nr 48 nbargs 2 types: (int, int) args: (fd, how)
-+syscall sys_bind nr 49 nbargs 3 types: (int, struct sockaddr *, int) args: (fd, umyaddr, addrlen)
-+syscall sys_listen nr 50 nbargs 2 types: (int, int) args: (fd, backlog)
-+syscall sys_getsockname nr 51 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, usockaddr, usockaddr_len)
-+syscall sys_getpeername nr 52 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, usockaddr, usockaddr_len)
-+syscall sys_socketpair nr 53 nbargs 4 types: (int, int, int, int *) args: (family, type, protocol, usockvec)
-+syscall sys_setsockopt nr 54 nbargs 5 types: (int, int, int, char *, int) args: (fd, level, optname, optval, optlen)
-+syscall sys_getsockopt nr 55 nbargs 5 types: (int, int, int, char *, int *) args: (fd, level, optname, optval, optlen)
-+syscall sys_exit nr 60 nbargs 1 types: (int) args: (error_code)
-+syscall sys_wait4 nr 61 nbargs 4 types: (pid_t, int *, int, struct rusage *) args: (upid, stat_addr, options, ru)
-+syscall sys_kill nr 62 nbargs 2 types: (pid_t, int) args: (pid, sig)
-+syscall sys_newuname nr 63 nbargs 1 types: (struct new_utsname *) args: (name)
-+syscall sys_semget nr 64 nbargs 3 types: (key_t, int, int) args: (key, nsems, semflg)
-+syscall sys_semop nr 65 nbargs 3 types: (int, struct sembuf *, unsigned) args: (semid, tsops, nsops)
-+syscall sys_shmdt nr 67 nbargs 1 types: (char *) args: (shmaddr)
-+syscall sys_msgget nr 68 nbargs 2 types: (key_t, int) args: (key, msgflg)
-+syscall sys_msgsnd nr 69 nbargs 4 types: (int, struct msgbuf *, size_t, int) args: (msqid, msgp, msgsz, msgflg)
-+syscall sys_msgrcv nr 70 nbargs 5 types: (int, struct msgbuf *, size_t, long, int) args: (msqid, msgp, msgsz, msgtyp, msgflg)
-+syscall sys_msgctl nr 71 nbargs 3 types: (int, int, struct msqid_ds *) args: (msqid, cmd, buf)
-+syscall sys_fcntl nr 72 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
-+syscall sys_flock nr 73 nbargs 2 types: (unsigned int, unsigned int) args: (fd, cmd)
-+syscall sys_fsync nr 74 nbargs 1 types: (unsigned int) args: (fd)
-+syscall sys_fdatasync nr 75 nbargs 1 types: (unsigned int) args: (fd)
-+syscall sys_truncate nr 76 nbargs 2 types: (const char *, long) args: (path, length)
-+syscall sys_ftruncate nr 77 nbargs 2 types: (unsigned int, unsigned long) args: (fd, length)
-+syscall sys_getdents nr 78 nbargs 3 types: (unsigned int, struct linux_dirent *, unsigned int) args: (fd, dirent, count)
-+syscall sys_getcwd nr 79 nbargs 2 types: (char *, unsigned long) args: (buf, size)
-+syscall sys_chdir nr 80 nbargs 1 types: (const char *) args: (filename)
-+syscall sys_fchdir nr 81 nbargs 1 types: (unsigned int) args: (fd)
-+syscall sys_rename nr 82 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
-+syscall sys_mkdir nr 83 nbargs 2 types: (const char *, int) args: (pathname, mode)
-+syscall sys_rmdir nr 84 nbargs 1 types: (const char *) args: (pathname)
-+syscall sys_creat nr 85 nbargs 2 types: (const char *, int) args: (pathname, mode)
-+syscall sys_link nr 86 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
-+syscall sys_unlink nr 87 nbargs 1 types: (const char *) args: (pathname)
-+syscall sys_symlink nr 88 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
-+syscall sys_readlink nr 89 nbargs 3 types: (const char *, char *, int) args: (path, buf, bufsiz)
-+syscall sys_chmod nr 90 nbargs 2 types: (const char *, mode_t) args: (filename, mode)
-+syscall sys_fchmod nr 91 nbargs 2 types: (unsigned int, mode_t) args: (fd, mode)
-+syscall sys_chown nr 92 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
-+syscall sys_fchown nr 93 nbargs 3 types: (unsigned int, uid_t, gid_t) args: (fd, user, group)
-+syscall sys_lchown nr 94 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
-+syscall sys_umask nr 95 nbargs 1 types: (int) args: (mask)
-+syscall sys_gettimeofday nr 96 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
-+syscall sys_getrlimit nr 97 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
-+syscall sys_getrusage nr 98 nbargs 2 types: (int, struct rusage *) args: (who, ru)
-+syscall sys_sysinfo nr 99 nbargs 1 types: (struct sysinfo *) args: (info)
-+syscall sys_times nr 100 nbargs 1 types: (struct tms *) args: (tbuf)
-+syscall sys_ptrace nr 101 nbargs 4 types: (long, long, unsigned long, unsigned long) args: (request, pid, addr, data)
-+syscall sys_getuid nr 102 nbargs 0 types: () args: ()
-+syscall sys_syslog nr 103 nbargs 3 types: (int, char *, int) args: (type, buf, len)
-+syscall sys_getgid nr 104 nbargs 0 types: () args: ()
-+syscall sys_setuid nr 105 nbargs 1 types: (uid_t) args: (uid)
-+syscall sys_setgid nr 106 nbargs 1 types: (gid_t) args: (gid)
-+syscall sys_geteuid nr 107 nbargs 0 types: () args: ()
-+syscall sys_getegid nr 108 nbargs 0 types: () args: ()
-+syscall sys_setpgid nr 109 nbargs 2 types: (pid_t, pid_t) args: (pid, pgid)
-+syscall sys_getppid nr 110 nbargs 0 types: () args: ()
-+syscall sys_getpgrp nr 111 nbargs 0 types: () args: ()
-+syscall sys_setsid nr 112 nbargs 0 types: () args: ()
-+syscall sys_setreuid nr 113 nbargs 2 types: (uid_t, uid_t) args: (ruid, euid)
-+syscall sys_setregid nr 114 nbargs 2 types: (gid_t, gid_t) args: (rgid, egid)
-+syscall sys_getgroups nr 115 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
-+syscall sys_setgroups nr 116 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
-+syscall sys_setresuid nr 117 nbargs 3 types: (uid_t, uid_t, uid_t) args: (ruid, euid, suid)
-+syscall sys_getresuid nr 118 nbargs 3 types: (uid_t *, uid_t *, uid_t *) args: (ruid, euid, suid)
-+syscall sys_setresgid nr 119 nbargs 3 types: (gid_t, gid_t, gid_t) args: (rgid, egid, sgid)
-+syscall sys_getresgid nr 120 nbargs 3 types: (gid_t *, gid_t *, gid_t *) args: (rgid, egid, sgid)
-+syscall sys_getpgid nr 121 nbargs 1 types: (pid_t) args: (pid)
-+syscall sys_setfsuid nr 122 nbargs 1 types: (uid_t) args: (uid)
-+syscall sys_setfsgid nr 123 nbargs 1 types: (gid_t) args: (gid)
-+syscall sys_getsid nr 124 nbargs 1 types: (pid_t) args: (pid)
-+syscall sys_capget nr 125 nbargs 2 types: (cap_user_header_t, cap_user_data_t) args: (header, dataptr)
-+syscall sys_capset nr 126 nbargs 2 types: (cap_user_header_t, const cap_user_data_t) args: (header, data)
-+syscall sys_rt_sigpending nr 127 nbargs 2 types: (sigset_t *, size_t) args: (set, sigsetsize)
-+syscall sys_rt_sigtimedwait nr 128 nbargs 4 types: (const sigset_t *, siginfo_t *, const struct timespec *, size_t) args: (uthese, uinfo, uts, sigsetsize)
-+syscall sys_rt_sigqueueinfo nr 129 nbargs 3 types: (pid_t, int, siginfo_t *) args: (pid, sig, uinfo)
-+syscall sys_rt_sigsuspend nr 130 nbargs 2 types: (sigset_t *, size_t) args: (unewset, sigsetsize)
-+syscall sys_utime nr 132 nbargs 2 types: (char *, struct utimbuf *) args: (filename, times)
-+syscall sys_mknod nr 133 nbargs 3 types: (const char *, int, unsigned) args: (filename, mode, dev)
-+syscall sys_personality nr 135 nbargs 1 types: (unsigned int) args: (personality)
-+syscall sys_ustat nr 136 nbargs 2 types: (unsigned, struct ustat *) args: (dev, ubuf)
-+syscall sys_statfs nr 137 nbargs 2 types: (const char *, struct statfs *) args: (pathname, buf)
-+syscall sys_fstatfs nr 138 nbargs 2 types: (unsigned int, struct statfs *) args: (fd, buf)
-+syscall sys_sysfs nr 139 nbargs 3 types: (int, unsigned long, unsigned long) args: (option, arg1, arg2)
-+syscall sys_getpriority nr 140 nbargs 2 types: (int, int) args: (which, who)
-+syscall sys_setpriority nr 141 nbargs 3 types: (int, int, int) args: (which, who, niceval)
-+syscall sys_sched_setparam nr 142 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
-+syscall sys_sched_getparam nr 143 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
-+syscall sys_sched_setscheduler nr 144 nbargs 3 types: (pid_t, int, struct sched_param *) args: (pid, policy, param)
-+syscall sys_sched_getscheduler nr 145 nbargs 1 types: (pid_t) args: (pid)
-+syscall sys_sched_get_priority_max nr 146 nbargs 1 types: (int) args: (policy)
-+syscall sys_sched_get_priority_min nr 147 nbargs 1 types: (int) args: (policy)
-+syscall sys_sched_rr_get_interval nr 148 nbargs 2 types: (pid_t, struct timespec *) args: (pid, interval)
-+syscall sys_mlock nr 149 nbargs 2 types: (unsigned long, size_t) args: (start, len)
-+syscall sys_munlock nr 150 nbargs 2 types: (unsigned long, size_t) args: (start, len)
-+syscall sys_mlockall nr 151 nbargs 1 types: (int) args: (flags)
-+syscall sys_munlockall nr 152 nbargs 0 types: () args: ()
-+syscall sys_vhangup nr 153 nbargs 0 types: () args: ()
-+syscall sys_pivot_root nr 155 nbargs 2 types: (const char *, const char *) args: (new_root, put_old)
-+syscall sys_sysctl nr 156 nbargs 1 types: (struct __sysctl_args *) args: (args)
-+syscall sys_prctl nr 157 nbargs 5 types: (int, unsigned long, unsigned long, unsigned long, unsigned long) args: (option, arg2, arg3, arg4, arg5)
-+syscall sys_adjtimex nr 159 nbargs 1 types: (struct timex *) args: (txc_p)
-+syscall sys_setrlimit nr 160 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
-+syscall sys_chroot nr 161 nbargs 1 types: (const char *) args: (filename)
-+syscall sys_sync nr 162 nbargs 0 types: () args: ()
-+syscall sys_settimeofday nr 164 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
-+syscall sys_mount nr 165 nbargs 5 types: (char *, char *, char *, unsigned long, void *) args: (dev_name, dir_name, type, flags, data)
-+syscall sys_umount nr 166 nbargs 2 types: (char *, int) args: (name, flags)
-+syscall sys_swapon nr 167 nbargs 2 types: (const char *, int) args: (specialfile, swap_flags)
-+syscall sys_swapoff nr 168 nbargs 1 types: (const char *) args: (specialfile)
-+syscall sys_reboot nr 169 nbargs 4 types: (int, int, unsigned int, void *) args: (magic1, magic2, cmd, arg)
-+syscall sys_sethostname nr 170 nbargs 2 types: (char *, int) args: (name, len)
-+syscall sys_setdomainname nr 171 nbargs 2 types: (char *, int) args: (name, len)
-+syscall sys_init_module nr 175 nbargs 3 types: (void *, unsigned long, const char *) args: (umod, len, uargs)
-+syscall sys_delete_module nr 176 nbargs 2 types: (const char *, unsigned int) args: (name_user, flags)
-+syscall sys_nfsservctl nr 180 nbargs 3 types: (int, struct nfsctl_arg *, void *) args: (cmd, arg, res)
-+syscall sys_gettid nr 186 nbargs 0 types: () args: ()
-+syscall sys_setxattr nr 188 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
-+syscall sys_lsetxattr nr 189 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
-+syscall sys_fsetxattr nr 190 nbargs 5 types: (int, const char *, const void *, size_t, int) args: (fd, name, value, size, flags)
-+syscall sys_getxattr nr 191 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
-+syscall sys_lgetxattr nr 192 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
-+syscall sys_fgetxattr nr 193 nbargs 4 types: (int, const char *, void *, size_t) args: (fd, name, value, size)
-+syscall sys_listxattr nr 194 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
-+syscall sys_llistxattr nr 195 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
-+syscall sys_flistxattr nr 196 nbargs 3 types: (int, char *, size_t) args: (fd, list, size)
-+syscall sys_removexattr nr 197 nbargs 2 types: (const char *, const char *) args: (pathname, name)
-+syscall sys_lremovexattr nr 198 nbargs 2 types: (const char *, const char *) args: (pathname, name)
-+syscall sys_fremovexattr nr 199 nbargs 2 types: (int, const char *) args: (fd, name)
-+syscall sys_tkill nr 200 nbargs 2 types: (pid_t, int) args: (pid, sig)
-+syscall sys_time nr 201 nbargs 1 types: (time_t *) args: (tloc)
-+syscall sys_futex nr 202 nbargs 6 types: (u32 *, int, u32, struct timespec *, u32 *, u32) args: (uaddr, op, val, utime, uaddr2, val3)
-+syscall sys_sched_setaffinity nr 203 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
-+syscall sys_sched_getaffinity nr 204 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
-+syscall sys_io_setup nr 206 nbargs 2 types: (unsigned, aio_context_t *) args: (nr_events, ctxp)
-+syscall sys_io_destroy nr 207 nbargs 1 types: (aio_context_t) args: (ctx)
-+syscall sys_io_getevents nr 208 nbargs 5 types: (aio_context_t, long, long, struct io_event *, struct timespec *) args: (ctx_id, min_nr, nr, events, timeout)
-+syscall sys_io_submit nr 209 nbargs 3 types: (aio_context_t, long, struct iocb * *) args: (ctx_id, nr, iocbpp)
-+syscall sys_io_cancel nr 210 nbargs 3 types: (aio_context_t, struct iocb *, struct io_event *) args: (ctx_id, iocb, result)
-+syscall sys_epoll_create nr 213 nbargs 1 types: (int) args: (size)
-+syscall sys_remap_file_pages nr 216 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (start, size, prot, pgoff, flags)
-+syscall sys_getdents64 nr 217 nbargs 3 types: (unsigned int, struct linux_dirent64 *, unsigned int) args: (fd, dirent, count)
-+syscall sys_set_tid_address nr 218 nbargs 1 types: (int *) args: (tidptr)
-+syscall sys_restart_syscall nr 219 nbargs 0 types: () args: ()
-+syscall sys_semtimedop nr 220 nbargs 4 types: (int, struct sembuf *, unsigned, const struct timespec *) args: (semid, tsops, nsops, timeout)
-+syscall sys_timer_create nr 222 nbargs 3 types: (const clockid_t, struct sigevent *, timer_t *) args: (which_clock, timer_event_spec, created_timer_id)
-+syscall sys_timer_settime nr 223 nbargs 4 types: (timer_t, int, const struct itimerspec *, struct itimerspec *) args: (timer_id, flags, new_setting, old_setting)
-+syscall sys_timer_gettime nr 224 nbargs 2 types: (timer_t, struct itimerspec *) args: (timer_id, setting)
-+syscall sys_timer_getoverrun nr 225 nbargs 1 types: (timer_t) args: (timer_id)
-+syscall sys_timer_delete nr 226 nbargs 1 types: (timer_t) args: (timer_id)
-+syscall sys_clock_settime nr 227 nbargs 2 types: (const clockid_t, const struct timespec *) args: (which_clock, tp)
-+syscall sys_clock_gettime nr 228 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
-+syscall sys_clock_getres nr 229 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
-+syscall sys_clock_nanosleep nr 230 nbargs 4 types: (const clockid_t, int, const struct timespec *, struct timespec *) args: (which_clock, flags, rqtp, rmtp)
-+syscall sys_exit_group nr 231 nbargs 1 types: (int) args: (error_code)
-+syscall sys_epoll_wait nr 232 nbargs 4 types: (int, struct epoll_event *, int, int) args: (epfd, events, maxevents, timeout)
-+syscall sys_epoll_ctl nr 233 nbargs 4 types: (int, int, int, struct epoll_event *) args: (epfd, op, fd, event)
-+syscall sys_tgkill nr 234 nbargs 3 types: (pid_t, pid_t, int) args: (tgid, pid, sig)
-+syscall sys_utimes nr 235 nbargs 2 types: (char *, struct timeval *) args: (filename, utimes)
-+syscall sys_mq_open nr 240 nbargs 4 types: (const char *, int, mode_t, struct mq_attr *) args: (u_name, oflag, mode, u_attr)
-+syscall sys_mq_unlink nr 241 nbargs 1 types: (const char *) args: (u_name)
-+syscall sys_mq_timedsend nr 242 nbargs 5 types: (mqd_t, const char *, size_t, unsigned int, const struct timespec *) args: (mqdes, u_msg_ptr, msg_len, msg_prio, u_abs_timeout)
-+syscall sys_mq_timedreceive nr 243 nbargs 5 types: (mqd_t, char *, size_t, unsigned int *, const struct timespec *) args: (mqdes, u_msg_ptr, msg_len, u_msg_prio, u_abs_timeout)
-+syscall sys_mq_notify nr 244 nbargs 2 types: (mqd_t, const struct sigevent *) args: (mqdes, u_notification)
-+syscall sys_mq_getsetattr nr 245 nbargs 3 types: (mqd_t, const struct mq_attr *, struct mq_attr *) args: (mqdes, u_mqstat, u_omqstat)
-+syscall sys_kexec_load nr 246 nbargs 4 types: (unsigned long, unsigned long, struct kexec_segment *, unsigned long) args: (entry, nr_segments, segments, flags)
-+syscall sys_waitid nr 247 nbargs 5 types: (int, pid_t, struct siginfo *, int, struct rusage *) args: (which, upid, infop, options, ru)
-+syscall sys_ioprio_set nr 251 nbargs 3 types: (int, int, int) args: (which, who, ioprio)
-+syscall sys_ioprio_get nr 252 nbargs 2 types: (int, int) args: (which, who)
-+syscall sys_inotify_init nr 253 nbargs 0 types: () args: ()
-+syscall sys_inotify_add_watch nr 254 nbargs 3 types: (int, const char *, u32) args: (fd, pathname, mask)
-+syscall sys_inotify_rm_watch nr 255 nbargs 2 types: (int, __s32) args: (fd, wd)
-+syscall sys_openat nr 257 nbargs 4 types: (int, const char *, int, int) args: (dfd, filename, flags, mode)
-+syscall sys_mkdirat nr 258 nbargs 3 types: (int, const char *, int) args: (dfd, pathname, mode)
-+syscall sys_mknodat nr 259 nbargs 4 types: (int, const char *, int, unsigned) args: (dfd, filename, mode, dev)
-+syscall sys_fchownat nr 260 nbargs 5 types: (int, const char *, uid_t, gid_t, int) args: (dfd, filename, user, group, flag)
-+syscall sys_futimesat nr 261 nbargs 3 types: (int, const char *, struct timeval *) args: (dfd, filename, utimes)
-+syscall sys_newfstatat nr 262 nbargs 4 types: (int, const char *, struct stat *, int) args: (dfd, filename, statbuf, flag)
-+syscall sys_unlinkat nr 263 nbargs 3 types: (int, const char *, int) args: (dfd, pathname, flag)
-+syscall sys_renameat nr 264 nbargs 4 types: (int, const char *, int, const char *) args: (olddfd, oldname, newdfd, newname)
-+syscall sys_linkat nr 265 nbargs 5 types: (int, const char *, int, const char *, int) args: (olddfd, oldname, newdfd, newname, flags)
-+syscall sys_symlinkat nr 266 nbargs 3 types: (const char *, int, const char *) args: (oldname, newdfd, newname)
-+syscall sys_readlinkat nr 267 nbargs 4 types: (int, const char *, char *, int) args: (dfd, pathname, buf, bufsiz)
-+syscall sys_fchmodat nr 268 nbargs 3 types: (int, const char *, mode_t) args: (dfd, filename, mode)
-+syscall sys_faccessat nr 269 nbargs 3 types: (int, const char *, int) args: (dfd, filename, mode)
-+syscall sys_pselect6 nr 270 nbargs 6 types: (int, fd_set *, fd_set *, fd_set *, struct timespec *, void *) args: (n, inp, outp, exp, tsp, sig)
-+syscall sys_ppoll nr 271 nbargs 5 types: (struct pollfd *, unsigned int, struct timespec *, const sigset_t *, size_t) args: (ufds, nfds, tsp, sigmask, sigsetsize)
-+syscall sys_unshare nr 272 nbargs 1 types: (unsigned long) args: (unshare_flags)
-+syscall sys_set_robust_list nr 273 nbargs 2 types: (struct robust_list_head *, size_t) args: (head, len)
-+syscall sys_get_robust_list nr 274 nbargs 3 types: (int, struct robust_list_head * *, size_t *) args: (pid, head_ptr, len_ptr)
-+syscall sys_splice nr 275 nbargs 6 types: (int, loff_t *, int, loff_t *, size_t, unsigned int) args: (fd_in, off_in, fd_out, off_out, len, flags)
-+syscall sys_tee nr 276 nbargs 4 types: (int, int, size_t, unsigned int) args: (fdin, fdout, len, flags)
-+syscall sys_vmsplice nr 278 nbargs 4 types: (int, const struct iovec *, unsigned long, unsigned int) args: (fd, iov, nr_segs, flags)
-+syscall sys_utimensat nr 280 nbargs 4 types: (int, const char *, struct timespec *, int) args: (dfd, filename, utimes, flags)
-+syscall sys_epoll_pwait nr 281 nbargs 6 types: (int, struct epoll_event *, int, int, const sigset_t *, size_t) args: (epfd, events, maxevents, timeout, sigmask, sigsetsize)
-+syscall sys_signalfd nr 282 nbargs 3 types: (int, sigset_t *, size_t) args: (ufd, user_mask, sizemask)
-+syscall sys_timerfd_create nr 283 nbargs 2 types: (int, int) args: (clockid, flags)
-+syscall sys_eventfd nr 284 nbargs 1 types: (unsigned int) args: (count)
-+syscall sys_timerfd_settime nr 286 nbargs 4 types: (int, int, const struct itimerspec *, struct itimerspec *) args: (ufd, flags, utmr, otmr)
-+syscall sys_timerfd_gettime nr 287 nbargs 2 types: (int, struct itimerspec *) args: (ufd, otmr)
-+syscall sys_accept4 nr 288 nbargs 4 types: (int, struct sockaddr *, int *, int) args: (fd, upeer_sockaddr, upeer_addrlen, flags)
-+syscall sys_signalfd4 nr 289 nbargs 4 types: (int, sigset_t *, size_t, int) args: (ufd, user_mask, sizemask, flags)
-+syscall sys_eventfd2 nr 290 nbargs 2 types: (unsigned int, int) args: (count, flags)
-+syscall sys_epoll_create1 nr 291 nbargs 1 types: (int) args: (flags)
-+syscall sys_dup3 nr 292 nbargs 3 types: (unsigned int, unsigned int, int) args: (oldfd, newfd, flags)
-+syscall sys_pipe2 nr 293 nbargs 2 types: (int *, int) args: (fildes, flags)
-+syscall sys_inotify_init1 nr 294 nbargs 1 types: (int) args: (flags)
-+syscall sys_preadv nr 295 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
-+syscall sys_pwritev nr 296 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
-+syscall sys_rt_tgsigqueueinfo nr 297 nbargs 4 types: (pid_t, pid_t, int, siginfo_t *) args: (tgid, pid, sig, uinfo)
-+syscall sys_perf_event_open nr 298 nbargs 5 types: (struct perf_event_attr *, pid_t, int, int, unsigned long) args: (attr_uptr, pid, cpu, group_fd, flags)
-+syscall sys_recvmmsg nr 299 nbargs 5 types: (int, struct mmsghdr *, unsigned int, unsigned int, struct timespec *) args: (fd, mmsg, vlen, flags, timeout)
-+syscall sys_prlimit64 nr 302 nbargs 4 types: (pid_t, unsigned int, const struct rlimit64 *, struct rlimit64 *) args: (pid, resource, new_rlim, old_rlim)
-+syscall sys_clock_adjtime nr 305 nbargs 2 types: (const clockid_t, struct timex *) args: (which_clock, utx)
-+syscall sys_syncfs nr 306 nbargs 1 types: (int) args: (fd)
-+syscall sys_sendmmsg nr 307 nbargs 4 types: (int, struct mmsghdr *, unsigned int, unsigned int) args: (fd, mmsg, vlen, flags)
-+syscall sys_setns nr 308 nbargs 2 types: (int, int) args: (fd, nstype)
-diff --git a/drivers/staging/lttng/instrumentation/syscalls/3.1.0-rc6/x86-32-syscalls-3.1.0-rc6 b/drivers/staging/lttng/instrumentation/syscalls/3.1.0-rc6/x86-32-syscalls-3.1.0-rc6
-new file mode 100644
-index 0000000..130c1e3
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/syscalls/3.1.0-rc6/x86-32-syscalls-3.1.0-rc6
-@@ -0,0 +1,291 @@
-+syscall sys_restart_syscall nr 0 nbargs 0 types: () args: ()
-+syscall sys_exit nr 1 nbargs 1 types: (int) args: (error_code)
-+syscall sys_read nr 3 nbargs 3 types: (unsigned int, char *, size_t) args: (fd, buf, count)
-+syscall sys_write nr 4 nbargs 3 types: (unsigned int, const char *, size_t) args: (fd, buf, count)
-+syscall sys_open nr 5 nbargs 3 types: (const char *, int, int) args: (filename, flags, mode)
-+syscall sys_close nr 6 nbargs 1 types: (unsigned int) args: (fd)
-+syscall sys_waitpid nr 7 nbargs 3 types: (pid_t, int *, int) args: (pid, stat_addr, options)
-+syscall sys_creat nr 8 nbargs 2 types: (const char *, int) args: (pathname, mode)
-+syscall sys_link nr 9 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
-+syscall sys_unlink nr 10 nbargs 1 types: (const char *) args: (pathname)
-+syscall sys_chdir nr 12 nbargs 1 types: (const char *) args: (filename)
-+syscall sys_time nr 13 nbargs 1 types: (time_t *) args: (tloc)
-+syscall sys_mknod nr 14 nbargs 3 types: (const char *, int, unsigned) args: (filename, mode, dev)
-+syscall sys_chmod nr 15 nbargs 2 types: (const char *, mode_t) args: (filename, mode)
-+syscall sys_lchown16 nr 16 nbargs 3 types: (const char *, old_uid_t, old_gid_t) args: (filename, user, group)
-+syscall sys_stat nr 18 nbargs 2 types: (const char *, struct __old_kernel_stat *) args: (filename, statbuf)
-+syscall sys_lseek nr 19 nbargs 3 types: (unsigned int, off_t, unsigned int) args: (fd, offset, origin)
-+syscall sys_getpid nr 20 nbargs 0 types: () args: ()
-+syscall sys_mount nr 21 nbargs 5 types: (char *, char *, char *, unsigned long, void *) args: (dev_name, dir_name, type, flags, data)
-+syscall sys_oldumount nr 22 nbargs 1 types: (char *) args: (name)
-+syscall sys_setuid16 nr 23 nbargs 1 types: (old_uid_t) args: (uid)
-+syscall sys_getuid16 nr 24 nbargs 0 types: () args: ()
-+syscall sys_stime nr 25 nbargs 1 types: (time_t *) args: (tptr)
-+syscall sys_ptrace nr 26 nbargs 4 types: (long, long, unsigned long, unsigned long) args: (request, pid, addr, data)
-+syscall sys_alarm nr 27 nbargs 1 types: (unsigned int) args: (seconds)
-+syscall sys_fstat nr 28 nbargs 2 types: (unsigned int, struct __old_kernel_stat *) args: (fd, statbuf)
-+syscall sys_pause nr 29 nbargs 0 types: () args: ()
-+syscall sys_utime nr 30 nbargs 2 types: (char *, struct utimbuf *) args: (filename, times)
-+syscall sys_access nr 33 nbargs 2 types: (const char *, int) args: (filename, mode)
-+syscall sys_nice nr 34 nbargs 1 types: (int) args: (increment)
-+syscall sys_sync nr 36 nbargs 0 types: () args: ()
-+syscall sys_kill nr 37 nbargs 2 types: (pid_t, int) args: (pid, sig)
-+syscall sys_rename nr 38 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
-+syscall sys_mkdir nr 39 nbargs 2 types: (const char *, int) args: (pathname, mode)
-+syscall sys_rmdir nr 40 nbargs 1 types: (const char *) args: (pathname)
-+syscall sys_dup nr 41 nbargs 1 types: (unsigned int) args: (fildes)
-+syscall sys_pipe nr 42 nbargs 1 types: (int *) args: (fildes)
-+syscall sys_times nr 43 nbargs 1 types: (struct tms *) args: (tbuf)
-+syscall sys_brk nr 45 nbargs 1 types: (unsigned long) args: (brk)
-+syscall sys_setgid16 nr 46 nbargs 1 types: (old_gid_t) args: (gid)
-+syscall sys_getgid16 nr 47 nbargs 0 types: () args: ()
-+syscall sys_signal nr 48 nbargs 2 types: (int, __sighandler_t) args: (sig, handler)
-+syscall sys_geteuid16 nr 49 nbargs 0 types: () args: ()
-+syscall sys_getegid16 nr 50 nbargs 0 types: () args: ()
-+syscall sys_acct nr 51 nbargs 1 types: (const char *) args: (name)
-+syscall sys_umount nr 52 nbargs 2 types: (char *, int) args: (name, flags)
-+syscall sys_ioctl nr 54 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
-+syscall sys_fcntl nr 55 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
-+syscall sys_setpgid nr 57 nbargs 2 types: (pid_t, pid_t) args: (pid, pgid)
-+syscall sys_olduname nr 59 nbargs 1 types: (struct oldold_utsname *) args: (name)
-+syscall sys_umask nr 60 nbargs 1 types: (int) args: (mask)
-+syscall sys_chroot nr 61 nbargs 1 types: (const char *) args: (filename)
-+syscall sys_ustat nr 62 nbargs 2 types: (unsigned, struct ustat *) args: (dev, ubuf)
-+syscall sys_dup2 nr 63 nbargs 2 types: (unsigned int, unsigned int) args: (oldfd, newfd)
-+syscall sys_getppid nr 64 nbargs 0 types: () args: ()
-+syscall sys_getpgrp nr 65 nbargs 0 types: () args: ()
-+syscall sys_setsid nr 66 nbargs 0 types: () args: ()
-+syscall sys_sgetmask nr 68 nbargs 0 types: () args: ()
-+syscall sys_ssetmask nr 69 nbargs 1 types: (int) args: (newmask)
-+syscall sys_setreuid16 nr 70 nbargs 2 types: (old_uid_t, old_uid_t) args: (ruid, euid)
-+syscall sys_setregid16 nr 71 nbargs 2 types: (old_gid_t, old_gid_t) args: (rgid, egid)
-+syscall sys_sigpending nr 73 nbargs 1 types: (old_sigset_t *) args: (set)
-+syscall sys_sethostname nr 74 nbargs 2 types: (char *, int) args: (name, len)
-+syscall sys_setrlimit nr 75 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
-+syscall sys_old_getrlimit nr 76 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
-+syscall sys_getrusage nr 77 nbargs 2 types: (int, struct rusage *) args: (who, ru)
-+syscall sys_gettimeofday nr 78 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
-+syscall sys_settimeofday nr 79 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
-+syscall sys_getgroups16 nr 80 nbargs 2 types: (int, old_gid_t *) args: (gidsetsize, grouplist)
-+syscall sys_setgroups16 nr 81 nbargs 2 types: (int, old_gid_t *) args: (gidsetsize, grouplist)
-+syscall sys_old_select nr 82 nbargs 1 types: (struct sel_arg_struct *) args: (arg)
-+syscall sys_symlink nr 83 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
-+syscall sys_lstat nr 84 nbargs 2 types: (const char *, struct __old_kernel_stat *) args: (filename, statbuf)
-+syscall sys_readlink nr 85 nbargs 3 types: (const char *, char *, int) args: (path, buf, bufsiz)
-+syscall sys_uselib nr 86 nbargs 1 types: (const char *) args: (library)
-+syscall sys_swapon nr 87 nbargs 2 types: (const char *, int) args: (specialfile, swap_flags)
-+syscall sys_reboot nr 88 nbargs 4 types: (int, int, unsigned int, void *) args: (magic1, magic2, cmd, arg)
-+syscall sys_old_readdir nr 89 nbargs 3 types: (unsigned int, struct old_linux_dirent *, unsigned int) args: (fd, dirent, count)
-+syscall sys_old_mmap nr 90 nbargs 1 types: (struct mmap_arg_struct *) args: (arg)
-+syscall sys_munmap nr 91 nbargs 2 types: (unsigned long, size_t) args: (addr, len)
-+syscall sys_truncate nr 92 nbargs 2 types: (const char *, long) args: (path, length)
-+syscall sys_ftruncate nr 93 nbargs 2 types: (unsigned int, unsigned long) args: (fd, length)
-+syscall sys_fchmod nr 94 nbargs 2 types: (unsigned int, mode_t) args: (fd, mode)
-+syscall sys_fchown16 nr 95 nbargs 3 types: (unsigned int, old_uid_t, old_gid_t) args: (fd, user, group)
-+syscall sys_getpriority nr 96 nbargs 2 types: (int, int) args: (which, who)
-+syscall sys_setpriority nr 97 nbargs 3 types: (int, int, int) args: (which, who, niceval)
-+syscall sys_statfs nr 99 nbargs 2 types: (const char *, struct statfs *) args: (pathname, buf)
-+syscall sys_fstatfs nr 100 nbargs 2 types: (unsigned int, struct statfs *) args: (fd, buf)
-+syscall sys_socketcall nr 102 nbargs 2 types: (int, unsigned long *) args: (call, args)
-+syscall sys_syslog nr 103 nbargs 3 types: (int, char *, int) args: (type, buf, len)
-+syscall sys_setitimer nr 104 nbargs 3 types: (int, struct itimerval *, struct itimerval *) args: (which, value, ovalue)
-+syscall sys_getitimer nr 105 nbargs 2 types: (int, struct itimerval *) args: (which, value)
-+syscall sys_newstat nr 106 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
-+syscall sys_newlstat nr 107 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
-+syscall sys_newfstat nr 108 nbargs 2 types: (unsigned int, struct stat *) args: (fd, statbuf)
-+syscall sys_uname nr 109 nbargs 1 types: (struct old_utsname *) args: (name)
-+syscall sys_vhangup nr 111 nbargs 0 types: () args: ()
-+syscall sys_wait4 nr 114 nbargs 4 types: (pid_t, int *, int, struct rusage *) args: (upid, stat_addr, options, ru)
-+syscall sys_swapoff nr 115 nbargs 1 types: (const char *) args: (specialfile)
-+syscall sys_sysinfo nr 116 nbargs 1 types: (struct sysinfo *) args: (info)
-+syscall sys_ipc nr 117 nbargs 6 types: (unsigned int, int, unsigned long, unsigned long, void *, long) args: (call, first, second, third, ptr, fifth)
-+syscall sys_fsync nr 118 nbargs 1 types: (unsigned int) args: (fd)
-+syscall sys_setdomainname nr 121 nbargs 2 types: (char *, int) args: (name, len)
-+syscall sys_newuname nr 122 nbargs 1 types: (struct new_utsname *) args: (name)
-+syscall sys_adjtimex nr 124 nbargs 1 types: (struct timex *) args: (txc_p)
-+syscall sys_mprotect nr 125 nbargs 3 types: (unsigned long, size_t, unsigned long) args: (start, len, prot)
-+syscall sys_sigprocmask nr 126 nbargs 3 types: (int, old_sigset_t *, old_sigset_t *) args: (how, nset, oset)
-+syscall sys_init_module nr 128 nbargs 3 types: (void *, unsigned long, const char *) args: (umod, len, uargs)
-+syscall sys_delete_module nr 129 nbargs 2 types: (const char *, unsigned int) args: (name_user, flags)
-+syscall sys_quotactl nr 131 nbargs 4 types: (unsigned int, const char *, qid_t, void *) args: (cmd, special, id, addr)
-+syscall sys_getpgid nr 132 nbargs 1 types: (pid_t) args: (pid)
-+syscall sys_fchdir nr 133 nbargs 1 types: (unsigned int) args: (fd)
-+syscall sys_bdflush nr 134 nbargs 2 types: (int, long) args: (func, data)
-+syscall sys_sysfs nr 135 nbargs 3 types: (int, unsigned long, unsigned long) args: (option, arg1, arg2)
-+syscall sys_personality nr 136 nbargs 1 types: (unsigned int) args: (personality)
-+syscall sys_setfsuid16 nr 138 nbargs 1 types: (old_uid_t) args: (uid)
-+syscall sys_setfsgid16 nr 139 nbargs 1 types: (old_gid_t) args: (gid)
-+syscall sys_llseek nr 140 nbargs 5 types: (unsigned int, unsigned long, unsigned long, loff_t *, unsigned int) args: (fd, offset_high, offset_low, result, origin)
-+syscall sys_getdents nr 141 nbargs 3 types: (unsigned int, struct linux_dirent *, unsigned int) args: (fd, dirent, count)
-+syscall sys_select nr 142 nbargs 5 types: (int, fd_set *, fd_set *, fd_set *, struct timeval *) args: (n, inp, outp, exp, tvp)
-+syscall sys_flock nr 143 nbargs 2 types: (unsigned int, unsigned int) args: (fd, cmd)
-+syscall sys_msync nr 144 nbargs 3 types: (unsigned long, size_t, int) args: (start, len, flags)
-+syscall sys_readv nr 145 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
-+syscall sys_writev nr 146 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
-+syscall sys_getsid nr 147 nbargs 1 types: (pid_t) args: (pid)
-+syscall sys_fdatasync nr 148 nbargs 1 types: (unsigned int) args: (fd)
-+syscall sys_sysctl nr 149 nbargs 1 types: (struct __sysctl_args *) args: (args)
-+syscall sys_mlock nr 150 nbargs 2 types: (unsigned long, size_t) args: (start, len)
-+syscall sys_munlock nr 151 nbargs 2 types: (unsigned long, size_t) args: (start, len)
-+syscall sys_mlockall nr 152 nbargs 1 types: (int) args: (flags)
-+syscall sys_munlockall nr 153 nbargs 0 types: () args: ()
-+syscall sys_sched_setparam nr 154 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
-+syscall sys_sched_getparam nr 155 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
-+syscall sys_sched_setscheduler nr 156 nbargs 3 types: (pid_t, int, struct sched_param *) args: (pid, policy, param)
-+syscall sys_sched_getscheduler nr 157 nbargs 1 types: (pid_t) args: (pid)
-+syscall sys_sched_yield nr 158 nbargs 0 types: () args: ()
-+syscall sys_sched_get_priority_max nr 159 nbargs 1 types: (int) args: (policy)
-+syscall sys_sched_get_priority_min nr 160 nbargs 1 types: (int) args: (policy)
-+syscall sys_sched_rr_get_interval nr 161 nbargs 2 types: (pid_t, struct timespec *) args: (pid, interval)
-+syscall sys_nanosleep nr 162 nbargs 2 types: (struct timespec *, struct timespec *) args: (rqtp, rmtp)
-+syscall sys_mremap nr 163 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (addr, old_len, new_len, flags, new_addr)
-+syscall sys_setresuid16 nr 164 nbargs 3 types: (old_uid_t, old_uid_t, old_uid_t) args: (ruid, euid, suid)
-+syscall sys_getresuid16 nr 165 nbargs 3 types: (old_uid_t *, old_uid_t *, old_uid_t *) args: (ruid, euid, suid)
-+syscall sys_poll nr 168 nbargs 3 types: (struct pollfd *, unsigned int, long) args: (ufds, nfds, timeout_msecs)
-+syscall sys_setresgid16 nr 170 nbargs 3 types: (old_gid_t, old_gid_t, old_gid_t) args: (rgid, egid, sgid)
-+syscall sys_getresgid16 nr 171 nbargs 3 types: (old_gid_t *, old_gid_t *, old_gid_t *) args: (rgid, egid, sgid)
-+syscall sys_prctl nr 172 nbargs 5 types: (int, unsigned long, unsigned long, unsigned long, unsigned long) args: (option, arg2, arg3, arg4, arg5)
-+syscall sys_rt_sigaction nr 174 nbargs 4 types: (int, const struct sigaction *, struct sigaction *, size_t) args: (sig, act, oact, sigsetsize)
-+syscall sys_rt_sigprocmask nr 175 nbargs 4 types: (int, sigset_t *, sigset_t *, size_t) args: (how, nset, oset, sigsetsize)
-+syscall sys_rt_sigpending nr 176 nbargs 2 types: (sigset_t *, size_t) args: (set, sigsetsize)
-+syscall sys_rt_sigtimedwait nr 177 nbargs 4 types: (const sigset_t *, siginfo_t *, const struct timespec *, size_t) args: (uthese, uinfo, uts, sigsetsize)
-+syscall sys_rt_sigqueueinfo nr 178 nbargs 3 types: (pid_t, int, siginfo_t *) args: (pid, sig, uinfo)
-+syscall sys_rt_sigsuspend nr 179 nbargs 2 types: (sigset_t *, size_t) args: (unewset, sigsetsize)
-+syscall sys_chown16 nr 182 nbargs 3 types: (const char *, old_uid_t, old_gid_t) args: (filename, user, group)
-+syscall sys_getcwd nr 183 nbargs 2 types: (char *, unsigned long) args: (buf, size)
-+syscall sys_capget nr 184 nbargs 2 types: (cap_user_header_t, cap_user_data_t) args: (header, dataptr)
-+syscall sys_capset nr 185 nbargs 2 types: (cap_user_header_t, const cap_user_data_t) args: (header, data)
-+syscall sys_sendfile nr 187 nbargs 4 types: (int, int, off_t *, size_t) args: (out_fd, in_fd, offset, count)
-+syscall sys_getrlimit nr 191 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
-+syscall sys_mmap_pgoff nr 192 nbargs 6 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (addr, len, prot, flags, fd, pgoff)
-+syscall sys_stat64 nr 195 nbargs 2 types: (const char *, struct stat64 *) args: (filename, statbuf)
-+syscall sys_lstat64 nr 196 nbargs 2 types: (const char *, struct stat64 *) args: (filename, statbuf)
-+syscall sys_fstat64 nr 197 nbargs 2 types: (unsigned long, struct stat64 *) args: (fd, statbuf)
-+syscall sys_lchown nr 198 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
-+syscall sys_getuid nr 199 nbargs 0 types: () args: ()
-+syscall sys_getgid nr 200 nbargs 0 types: () args: ()
-+syscall sys_geteuid nr 201 nbargs 0 types: () args: ()
-+syscall sys_getegid nr 202 nbargs 0 types: () args: ()
-+syscall sys_setreuid nr 203 nbargs 2 types: (uid_t, uid_t) args: (ruid, euid)
-+syscall sys_setregid nr 204 nbargs 2 types: (gid_t, gid_t) args: (rgid, egid)
-+syscall sys_getgroups nr 205 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
-+syscall sys_setgroups nr 206 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
-+syscall sys_fchown nr 207 nbargs 3 types: (unsigned int, uid_t, gid_t) args: (fd, user, group)
-+syscall sys_setresuid nr 208 nbargs 3 types: (uid_t, uid_t, uid_t) args: (ruid, euid, suid)
-+syscall sys_getresuid nr 209 nbargs 3 types: (uid_t *, uid_t *, uid_t *) args: (ruid, euid, suid)
-+syscall sys_setresgid nr 210 nbargs 3 types: (gid_t, gid_t, gid_t) args: (rgid, egid, sgid)
-+syscall sys_getresgid nr 211 nbargs 3 types: (gid_t *, gid_t *, gid_t *) args: (rgid, egid, sgid)
-+syscall sys_chown nr 212 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
-+syscall sys_setuid nr 213 nbargs 1 types: (uid_t) args: (uid)
-+syscall sys_setgid nr 214 nbargs 1 types: (gid_t) args: (gid)
-+syscall sys_setfsuid nr 215 nbargs 1 types: (uid_t) args: (uid)
-+syscall sys_setfsgid nr 216 nbargs 1 types: (gid_t) args: (gid)
-+syscall sys_pivot_root nr 217 nbargs 2 types: (const char *, const char *) args: (new_root, put_old)
-+syscall sys_mincore nr 218 nbargs 3 types: (unsigned long, size_t, unsigned char *) args: (start, len, vec)
-+syscall sys_madvise nr 219 nbargs 3 types: (unsigned long, size_t, int) args: (start, len_in, behavior)
-+syscall sys_getdents64 nr 220 nbargs 3 types: (unsigned int, struct linux_dirent64 *, unsigned int) args: (fd, dirent, count)
-+syscall sys_fcntl64 nr 221 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
-+syscall sys_gettid nr 224 nbargs 0 types: () args: ()
-+syscall sys_setxattr nr 226 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
-+syscall sys_lsetxattr nr 227 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
-+syscall sys_fsetxattr nr 228 nbargs 5 types: (int, const char *, const void *, size_t, int) args: (fd, name, value, size, flags)
-+syscall sys_getxattr nr 229 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
-+syscall sys_lgetxattr nr 230 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
-+syscall sys_fgetxattr nr 231 nbargs 4 types: (int, const char *, void *, size_t) args: (fd, name, value, size)
-+syscall sys_listxattr nr 232 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
-+syscall sys_llistxattr nr 233 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
-+syscall sys_flistxattr nr 234 nbargs 3 types: (int, char *, size_t) args: (fd, list, size)
-+syscall sys_removexattr nr 235 nbargs 2 types: (const char *, const char *) args: (pathname, name)
-+syscall sys_lremovexattr nr 236 nbargs 2 types: (const char *, const char *) args: (pathname, name)
-+syscall sys_fremovexattr nr 237 nbargs 2 types: (int, const char *) args: (fd, name)
-+syscall sys_tkill nr 238 nbargs 2 types: (pid_t, int) args: (pid, sig)
-+syscall sys_sendfile64 nr 239 nbargs 4 types: (int, int, loff_t *, size_t) args: (out_fd, in_fd, offset, count)
-+syscall sys_futex nr 240 nbargs 6 types: (u32 *, int, u32, struct timespec *, u32 *, u32) args: (uaddr, op, val, utime, uaddr2, val3)
-+syscall sys_sched_setaffinity nr 241 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
-+syscall sys_sched_getaffinity nr 242 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
-+syscall sys_io_setup nr 245 nbargs 2 types: (unsigned, aio_context_t *) args: (nr_events, ctxp)
-+syscall sys_io_destroy nr 246 nbargs 1 types: (aio_context_t) args: (ctx)
-+syscall sys_io_getevents nr 247 nbargs 5 types: (aio_context_t, long, long, struct io_event *, struct timespec *) args: (ctx_id, min_nr, nr, events, timeout)
-+syscall sys_io_submit nr 248 nbargs 3 types: (aio_context_t, long, struct iocb * *) args: (ctx_id, nr, iocbpp)
-+syscall sys_io_cancel nr 249 nbargs 3 types: (aio_context_t, struct iocb *, struct io_event *) args: (ctx_id, iocb, result)
-+syscall sys_exit_group nr 252 nbargs 1 types: (int) args: (error_code)
-+syscall sys_epoll_create nr 254 nbargs 1 types: (int) args: (size)
-+syscall sys_epoll_ctl nr 255 nbargs 4 types: (int, int, int, struct epoll_event *) args: (epfd, op, fd, event)
-+syscall sys_epoll_wait nr 256 nbargs 4 types: (int, struct epoll_event *, int, int) args: (epfd, events, maxevents, timeout)
-+syscall sys_remap_file_pages nr 257 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (start, size, prot, pgoff, flags)
-+syscall sys_set_tid_address nr 258 nbargs 1 types: (int *) args: (tidptr)
-+syscall sys_timer_create nr 259 nbargs 3 types: (const clockid_t, struct sigevent *, timer_t *) args: (which_clock, timer_event_spec, created_timer_id)
-+syscall sys_timer_settime nr 260 nbargs 4 types: (timer_t, int, const struct itimerspec *, struct itimerspec *) args: (timer_id, flags, new_setting, old_setting)
-+syscall sys_timer_gettime nr 261 nbargs 2 types: (timer_t, struct itimerspec *) args: (timer_id, setting)
-+syscall sys_timer_getoverrun nr 262 nbargs 1 types: (timer_t) args: (timer_id)
-+syscall sys_timer_delete nr 263 nbargs 1 types: (timer_t) args: (timer_id)
-+syscall sys_clock_settime nr 264 nbargs 2 types: (const clockid_t, const struct timespec *) args: (which_clock, tp)
-+syscall sys_clock_gettime nr 265 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
-+syscall sys_clock_getres nr 266 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
-+syscall sys_clock_nanosleep nr 267 nbargs 4 types: (const clockid_t, int, const struct timespec *, struct timespec *) args: (which_clock, flags, rqtp, rmtp)
-+syscall sys_statfs64 nr 268 nbargs 3 types: (const char *, size_t, struct statfs64 *) args: (pathname, sz, buf)
-+syscall sys_fstatfs64 nr 269 nbargs 3 types: (unsigned int, size_t, struct statfs64 *) args: (fd, sz, buf)
-+syscall sys_tgkill nr 270 nbargs 3 types: (pid_t, pid_t, int) args: (tgid, pid, sig)
-+syscall sys_utimes nr 271 nbargs 2 types: (char *, struct timeval *) args: (filename, utimes)
-+syscall sys_mq_open nr 277 nbargs 4 types: (const char *, int, mode_t, struct mq_attr *) args: (u_name, oflag, mode, u_attr)
-+syscall sys_mq_unlink nr 278 nbargs 1 types: (const char *) args: (u_name)
-+syscall sys_mq_timedsend nr 279 nbargs 5 types: (mqd_t, const char *, size_t, unsigned int, const struct timespec *) args: (mqdes, u_msg_ptr, msg_len, msg_prio, u_abs_timeout)
-+syscall sys_mq_timedreceive nr 280 nbargs 5 types: (mqd_t, char *, size_t, unsigned int *, const struct timespec *) args: (mqdes, u_msg_ptr, msg_len, u_msg_prio, u_abs_timeout)
-+syscall sys_mq_notify nr 281 nbargs 2 types: (mqd_t, const struct sigevent *) args: (mqdes, u_notification)
-+syscall sys_mq_getsetattr nr 282 nbargs 3 types: (mqd_t, const struct mq_attr *, struct mq_attr *) args: (mqdes, u_mqstat, u_omqstat)
-+syscall sys_kexec_load nr 283 nbargs 4 types: (unsigned long, unsigned long, struct kexec_segment *, unsigned long) args: (entry, nr_segments, segments, flags)
-+syscall sys_waitid nr 284 nbargs 5 types: (int, pid_t, struct siginfo *, int, struct rusage *) args: (which, upid, infop, options, ru)
-+syscall sys_add_key nr 286 nbargs 5 types: (const char *, const char *, const void *, size_t, key_serial_t) args: (_type, _description, _payload, plen, ringid)
-+syscall sys_request_key nr 287 nbargs 4 types: (const char *, const char *, const char *, key_serial_t) args: (_type, _description, _callout_info, destringid)
-+syscall sys_keyctl nr 288 nbargs 5 types: (int, unsigned long, unsigned long, unsigned long, unsigned long) args: (option, arg2, arg3, arg4, arg5)
-+syscall sys_ioprio_set nr 289 nbargs 3 types: (int, int, int) args: (which, who, ioprio)
-+syscall sys_ioprio_get nr 290 nbargs 2 types: (int, int) args: (which, who)
-+syscall sys_inotify_init nr 291 nbargs 0 types: () args: ()
-+syscall sys_inotify_add_watch nr 292 nbargs 3 types: (int, const char *, u32) args: (fd, pathname, mask)
-+syscall sys_inotify_rm_watch nr 293 nbargs 2 types: (int, __s32) args: (fd, wd)
-+syscall sys_openat nr 295 nbargs 4 types: (int, const char *, int, int) args: (dfd, filename, flags, mode)
-+syscall sys_mkdirat nr 296 nbargs 3 types: (int, const char *, int) args: (dfd, pathname, mode)
-+syscall sys_mknodat nr 297 nbargs 4 types: (int, const char *, int, unsigned) args: (dfd, filename, mode, dev)
-+syscall sys_fchownat nr 298 nbargs 5 types: (int, const char *, uid_t, gid_t, int) args: (dfd, filename, user, group, flag)
-+syscall sys_futimesat nr 299 nbargs 3 types: (int, const char *, struct timeval *) args: (dfd, filename, utimes)
-+syscall sys_fstatat64 nr 300 nbargs 4 types: (int, const char *, struct stat64 *, int) args: (dfd, filename, statbuf, flag)
-+syscall sys_unlinkat nr 301 nbargs 3 types: (int, const char *, int) args: (dfd, pathname, flag)
-+syscall sys_renameat nr 302 nbargs 4 types: (int, const char *, int, const char *) args: (olddfd, oldname, newdfd, newname)
-+syscall sys_linkat nr 303 nbargs 5 types: (int, const char *, int, const char *, int) args: (olddfd, oldname, newdfd, newname, flags)
-+syscall sys_symlinkat nr 304 nbargs 3 types: (const char *, int, const char *) args: (oldname, newdfd, newname)
-+syscall sys_readlinkat nr 305 nbargs 4 types: (int, const char *, char *, int) args: (dfd, pathname, buf, bufsiz)
-+syscall sys_fchmodat nr 306 nbargs 3 types: (int, const char *, mode_t) args: (dfd, filename, mode)
-+syscall sys_faccessat nr 307 nbargs 3 types: (int, const char *, int) args: (dfd, filename, mode)
-+syscall sys_pselect6 nr 308 nbargs 6 types: (int, fd_set *, fd_set *, fd_set *, struct timespec *, void *) args: (n, inp, outp, exp, tsp, sig)
-+syscall sys_ppoll nr 309 nbargs 5 types: (struct pollfd *, unsigned int, struct timespec *, const sigset_t *, size_t) args: (ufds, nfds, tsp, sigmask, sigsetsize)
-+syscall sys_unshare nr 310 nbargs 1 types: (unsigned long) args: (unshare_flags)
-+syscall sys_set_robust_list nr 311 nbargs 2 types: (struct robust_list_head *, size_t) args: (head, len)
-+syscall sys_get_robust_list nr 312 nbargs 3 types: (int, struct robust_list_head * *, size_t *) args: (pid, head_ptr, len_ptr)
-+syscall sys_splice nr 313 nbargs 6 types: (int, loff_t *, int, loff_t *, size_t, unsigned int) args: (fd_in, off_in, fd_out, off_out, len, flags)
-+syscall sys_tee nr 315 nbargs 4 types: (int, int, size_t, unsigned int) args: (fdin, fdout, len, flags)
-+syscall sys_vmsplice nr 316 nbargs 4 types: (int, const struct iovec *, unsigned long, unsigned int) args: (fd, iov, nr_segs, flags)
-+syscall sys_getcpu nr 318 nbargs 3 types: (unsigned *, unsigned *, struct getcpu_cache *) args: (cpup, nodep, unused)
-+syscall sys_epoll_pwait nr 319 nbargs 6 types: (int, struct epoll_event *, int, int, const sigset_t *, size_t) args: (epfd, events, maxevents, timeout, sigmask, sigsetsize)
-+syscall sys_utimensat nr 320 nbargs 4 types: (int, const char *, struct timespec *, int) args: (dfd, filename, utimes, flags)
-+syscall sys_signalfd nr 321 nbargs 3 types: (int, sigset_t *, size_t) args: (ufd, user_mask, sizemask)
-+syscall sys_timerfd_create nr 322 nbargs 2 types: (int, int) args: (clockid, flags)
-+syscall sys_eventfd nr 323 nbargs 1 types: (unsigned int) args: (count)
-+syscall sys_timerfd_settime nr 325 nbargs 4 types: (int, int, const struct itimerspec *, struct itimerspec *) args: (ufd, flags, utmr, otmr)
-+syscall sys_timerfd_gettime nr 326 nbargs 2 types: (int, struct itimerspec *) args: (ufd, otmr)
-+syscall sys_signalfd4 nr 327 nbargs 4 types: (int, sigset_t *, size_t, int) args: (ufd, user_mask, sizemask, flags)
-+syscall sys_eventfd2 nr 328 nbargs 2 types: (unsigned int, int) args: (count, flags)
-+syscall sys_epoll_create1 nr 329 nbargs 1 types: (int) args: (flags)
-+syscall sys_dup3 nr 330 nbargs 3 types: (unsigned int, unsigned int, int) args: (oldfd, newfd, flags)
-+syscall sys_pipe2 nr 331 nbargs 2 types: (int *, int) args: (fildes, flags)
-+syscall sys_inotify_init1 nr 332 nbargs 1 types: (int) args: (flags)
-+syscall sys_preadv nr 333 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
-+syscall sys_pwritev nr 334 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
-+syscall sys_rt_tgsigqueueinfo nr 335 nbargs 4 types: (pid_t, pid_t, int, siginfo_t *) args: (tgid, pid, sig, uinfo)
-+syscall sys_perf_event_open nr 336 nbargs 5 types: (struct perf_event_attr *, pid_t, int, int, unsigned long) args: (attr_uptr, pid, cpu, group_fd, flags)
-+syscall sys_recvmmsg nr 337 nbargs 5 types: (int, struct mmsghdr *, unsigned int, unsigned int, struct timespec *) args: (fd, mmsg, vlen, flags, timeout)
-+syscall sys_fanotify_init nr 338 nbargs 2 types: (unsigned int, unsigned int) args: (flags, event_f_flags)
-+syscall sys_prlimit64 nr 340 nbargs 4 types: (pid_t, unsigned int, const struct rlimit64 *, struct rlimit64 *) args: (pid, resource, new_rlim, old_rlim)
-+syscall sys_clock_adjtime nr 343 nbargs 2 types: (const clockid_t, struct timex *) args: (which_clock, utx)
-+syscall sys_syncfs nr 344 nbargs 1 types: (int) args: (fd)
-+syscall sys_sendmmsg nr 345 nbargs 4 types: (int, struct mmsghdr *, unsigned int, unsigned int) args: (fd, mmsg, vlen, flags)
-+syscall sys_setns nr 346 nbargs 2 types: (int, int) args: (fd, nstype)
-diff --git a/drivers/staging/lttng/instrumentation/syscalls/README b/drivers/staging/lttng/instrumentation/syscalls/README
-new file mode 100644
-index 0000000..6c235e1
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/syscalls/README
-@@ -0,0 +1,18 @@
-+LTTng system call tracing
-+
-+1) lttng-syscall-extractor
-+
-+You need to build a kernel with CONFIG_FTRACE_SYSCALLS=y and
-+CONFIG_KALLSYMS_ALL=y for extraction. Apply the linker patch to get your
-+kernel to keep the system call metadata after boot. Then build and load
-+the LTTng syscall extractor module. The module will fail to load (this
-+is expected). See the dmesg output for system call metadata.
-+
-+2) Generate system call TRACE_EVENT().
-+
-+Take the dmesg metadata and feed it to lttng-syscalls-generate-headers.sh, e.g.,
-+from the instrumentation/syscalls directory. See the script header for
-+usage example.
-+
-+After these are created, we just need to follow the new system call additions,
-+no need to regenerate the whole thing, since system calls are only appended to.
-diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/compat_syscalls_integers.h b/drivers/staging/lttng/instrumentation/syscalls/headers/compat_syscalls_integers.h
-new file mode 100644
-index 0000000..dabc4bf
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/syscalls/headers/compat_syscalls_integers.h
-@@ -0,0 +1,3 @@
-+#ifdef CONFIG_X86_64
-+#include "x86-32-syscalls-3.1.0-rc6_integers.h"
-+#endif
-diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/compat_syscalls_pointers.h b/drivers/staging/lttng/instrumentation/syscalls/headers/compat_syscalls_pointers.h
-new file mode 100644
-index 0000000..a84423c
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/syscalls/headers/compat_syscalls_pointers.h
-@@ -0,0 +1,3 @@
-+#ifdef CONFIG_X86_64
-+#include "x86-32-syscalls-3.1.0-rc6_pointers.h"
-+#endif
-diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_integers.h b/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_integers.h
-new file mode 100644
-index 0000000..41db916
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_integers.h
-@@ -0,0 +1,7 @@
-+#ifdef CONFIG_X86_64
-+#include "x86-64-syscalls-3.0.4_integers.h"
-+#endif
-+
-+#ifdef CONFIG_X86_32
-+#include "x86-32-syscalls-3.1.0-rc6_integers.h"
-+#endif
-diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_integers_override.h b/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_integers_override.h
-new file mode 100644
-index 0000000..276d9a6
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_integers_override.h
-@@ -0,0 +1,14 @@
-+#define OVERRIDE_32_sys_mmap
-+#define OVERRIDE_64_sys_mmap
-+
-+#ifndef CREATE_SYSCALL_TABLE
-+
-+SC_TRACE_EVENT(sys_mmap,
-+ TP_PROTO(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long off),
-+ TP_ARGS(addr, len, prot, flags, fd, off),
-+ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(size_t, len) __field(int, prot) __field(int, flags) __field(int, fd) __field(off_t, offset)),
-+ TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len) tp_assign(prot, prot) tp_assign(flags, flags) tp_assign(fd, fd) tp_assign(offset, off)),
-+ TP_printk()
-+)
-+
-+#endif /* CREATE_SYSCALL_TABLE */
-diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers.h b/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers.h
-new file mode 100644
-index 0000000..3223890
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers.h
-@@ -0,0 +1,7 @@
-+#ifdef CONFIG_X86_64
-+#include "x86-64-syscalls-3.0.4_pointers.h"
-+#endif
-+
-+#ifdef CONFIG_X86_32
-+#include "x86-32-syscalls-3.1.0-rc6_pointers.h"
-+#endif
-diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers_override.h b/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers_override.h
-new file mode 100644
-index 0000000..e464a4e
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers_override.h
-@@ -0,0 +1,4 @@
-+/*
-+ * This is a place-holder for override defines for system calls with
-+ * pointers (all architectures).
-+ */
-diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_unknown.h b/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_unknown.h
-new file mode 100644
-index 0000000..4582d03
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_unknown.h
-@@ -0,0 +1,55 @@
-+#if !defined(_TRACE_SYSCALLS_UNKNOWN_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_SYSCALLS_UNKNOWN_H
-+
-+#include <linux/tracepoint.h>
-+#include <linux/syscalls.h>
-+
-+#define UNKNOWN_SYSCALL_NRARGS 6
-+
-+TRACE_EVENT(sys_unknown,
-+ TP_PROTO(unsigned int id, unsigned long *args),
-+ TP_ARGS(id, args),
-+ TP_STRUCT__entry(
-+ __field(unsigned int, id)
-+ __array(unsigned long, args, UNKNOWN_SYSCALL_NRARGS)
-+ ),
-+ TP_fast_assign(
-+ tp_assign(id, id)
-+ tp_memcpy(args, args, UNKNOWN_SYSCALL_NRARGS * sizeof(*args))
-+ ),
-+ TP_printk()
-+)
-+TRACE_EVENT(compat_sys_unknown,
-+ TP_PROTO(unsigned int id, unsigned long *args),
-+ TP_ARGS(id, args),
-+ TP_STRUCT__entry(
-+ __field(unsigned int, id)
-+ __array(unsigned long, args, UNKNOWN_SYSCALL_NRARGS)
-+ ),
-+ TP_fast_assign(
-+ tp_assign(id, id)
-+ tp_memcpy(args, args, UNKNOWN_SYSCALL_NRARGS * sizeof(*args))
-+ ),
-+ TP_printk()
-+)
-+/*
-+ * This is going to hook on sys_exit in the kernel.
-+ * We change the name so we don't clash with the sys_exit syscall entry
-+ * event.
-+ */
-+TRACE_EVENT(exit_syscall,
-+ TP_PROTO(struct pt_regs *regs, long ret),
-+ TP_ARGS(regs, ret),
-+ TP_STRUCT__entry(
-+ __field(long, ret)
-+ ),
-+ TP_fast_assign(
-+ tp_assign(ret, ret)
-+ ),
-+ TP_printk()
-+)
-+
-+#endif /* _TRACE_SYSCALLS_UNKNOWN_H */
-+
-+/* This part must be outside protection */
-+#include "../../../probes/define_trace.h"
-diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_integers.h b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_integers.h
-new file mode 100644
-index 0000000..f4ee16c
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_integers.h
-@@ -0,0 +1,1163 @@
-+/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */
-+#ifndef CREATE_SYSCALL_TABLE
-+
-+#if !defined(_TRACE_SYSCALLS_INTEGERS_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_SYSCALLS_INTEGERS_H
-+
-+#include <linux/tracepoint.h>
-+#include <linux/syscalls.h>
-+#include "x86-32-syscalls-3.1.0-rc6_integers_override.h"
-+#include "syscalls_integers_override.h"
-+
-+SC_DECLARE_EVENT_CLASS_NOARGS(syscalls_noargs,
-+ TP_STRUCT__entry(),
-+ TP_fast_assign(),
-+ TP_printk()
-+)
-+#ifndef OVERRIDE_32_sys_restart_syscall
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_restart_syscall)
-+#endif
-+#ifndef OVERRIDE_32_sys_getpid
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getpid)
-+#endif
-+#ifndef OVERRIDE_32_sys_getuid16
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getuid16)
-+#endif
-+#ifndef OVERRIDE_32_sys_pause
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_pause)
-+#endif
-+#ifndef OVERRIDE_32_sys_sync
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sync)
-+#endif
-+#ifndef OVERRIDE_32_sys_getgid16
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getgid16)
-+#endif
-+#ifndef OVERRIDE_32_sys_geteuid16
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_geteuid16)
-+#endif
-+#ifndef OVERRIDE_32_sys_getegid16
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getegid16)
-+#endif
-+#ifndef OVERRIDE_32_sys_getppid
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getppid)
-+#endif
-+#ifndef OVERRIDE_32_sys_getpgrp
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getpgrp)
-+#endif
-+#ifndef OVERRIDE_32_sys_setsid
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_setsid)
-+#endif
-+#ifndef OVERRIDE_32_sys_sgetmask
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sgetmask)
-+#endif
-+#ifndef OVERRIDE_32_sys_vhangup
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_vhangup)
-+#endif
-+#ifndef OVERRIDE_32_sys_munlockall
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_munlockall)
-+#endif
-+#ifndef OVERRIDE_32_sys_sched_yield
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sched_yield)
-+#endif
-+#ifndef OVERRIDE_32_sys_getuid
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getuid)
-+#endif
-+#ifndef OVERRIDE_32_sys_getgid
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getgid)
-+#endif
-+#ifndef OVERRIDE_32_sys_geteuid
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_geteuid)
-+#endif
-+#ifndef OVERRIDE_32_sys_getegid
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getegid)
-+#endif
-+#ifndef OVERRIDE_32_sys_gettid
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_gettid)
-+#endif
-+#ifndef OVERRIDE_32_sys_inotify_init
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_inotify_init)
-+#endif
-+#ifndef OVERRIDE_32_sys_exit
-+SC_TRACE_EVENT(sys_exit,
-+ TP_PROTO(int error_code),
-+ TP_ARGS(error_code),
-+ TP_STRUCT__entry(__field(int, error_code)),
-+ TP_fast_assign(tp_assign(error_code, error_code)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_close
-+SC_TRACE_EVENT(sys_close,
-+ TP_PROTO(unsigned int fd),
-+ TP_ARGS(fd),
-+ TP_STRUCT__entry(__field(unsigned int, fd)),
-+ TP_fast_assign(tp_assign(fd, fd)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setuid16
-+SC_TRACE_EVENT(sys_setuid16,
-+ TP_PROTO(old_uid_t uid),
-+ TP_ARGS(uid),
-+ TP_STRUCT__entry(__field(old_uid_t, uid)),
-+ TP_fast_assign(tp_assign(uid, uid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_alarm
-+SC_TRACE_EVENT(sys_alarm,
-+ TP_PROTO(unsigned int seconds),
-+ TP_ARGS(seconds),
-+ TP_STRUCT__entry(__field(unsigned int, seconds)),
-+ TP_fast_assign(tp_assign(seconds, seconds)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_nice
-+SC_TRACE_EVENT(sys_nice,
-+ TP_PROTO(int increment),
-+ TP_ARGS(increment),
-+ TP_STRUCT__entry(__field(int, increment)),
-+ TP_fast_assign(tp_assign(increment, increment)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_dup
-+SC_TRACE_EVENT(sys_dup,
-+ TP_PROTO(unsigned int fildes),
-+ TP_ARGS(fildes),
-+ TP_STRUCT__entry(__field(unsigned int, fildes)),
-+ TP_fast_assign(tp_assign(fildes, fildes)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_brk
-+SC_TRACE_EVENT(sys_brk,
-+ TP_PROTO(unsigned long brk),
-+ TP_ARGS(brk),
-+ TP_STRUCT__entry(__field(unsigned long, brk)),
-+ TP_fast_assign(tp_assign(brk, brk)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setgid16
-+SC_TRACE_EVENT(sys_setgid16,
-+ TP_PROTO(old_gid_t gid),
-+ TP_ARGS(gid),
-+ TP_STRUCT__entry(__field(old_gid_t, gid)),
-+ TP_fast_assign(tp_assign(gid, gid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_umask
-+SC_TRACE_EVENT(sys_umask,
-+ TP_PROTO(int mask),
-+ TP_ARGS(mask),
-+ TP_STRUCT__entry(__field(int, mask)),
-+ TP_fast_assign(tp_assign(mask, mask)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_ssetmask
-+SC_TRACE_EVENT(sys_ssetmask,
-+ TP_PROTO(int newmask),
-+ TP_ARGS(newmask),
-+ TP_STRUCT__entry(__field(int, newmask)),
-+ TP_fast_assign(tp_assign(newmask, newmask)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_fsync
-+SC_TRACE_EVENT(sys_fsync,
-+ TP_PROTO(unsigned int fd),
-+ TP_ARGS(fd),
-+ TP_STRUCT__entry(__field(unsigned int, fd)),
-+ TP_fast_assign(tp_assign(fd, fd)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_getpgid
-+SC_TRACE_EVENT(sys_getpgid,
-+ TP_PROTO(pid_t pid),
-+ TP_ARGS(pid),
-+ TP_STRUCT__entry(__field(pid_t, pid)),
-+ TP_fast_assign(tp_assign(pid, pid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_fchdir
-+SC_TRACE_EVENT(sys_fchdir,
-+ TP_PROTO(unsigned int fd),
-+ TP_ARGS(fd),
-+ TP_STRUCT__entry(__field(unsigned int, fd)),
-+ TP_fast_assign(tp_assign(fd, fd)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_personality
-+SC_TRACE_EVENT(sys_personality,
-+ TP_PROTO(unsigned int personality),
-+ TP_ARGS(personality),
-+ TP_STRUCT__entry(__field(unsigned int, personality)),
-+ TP_fast_assign(tp_assign(personality, personality)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setfsuid16
-+SC_TRACE_EVENT(sys_setfsuid16,
-+ TP_PROTO(old_uid_t uid),
-+ TP_ARGS(uid),
-+ TP_STRUCT__entry(__field(old_uid_t, uid)),
-+ TP_fast_assign(tp_assign(uid, uid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setfsgid16
-+SC_TRACE_EVENT(sys_setfsgid16,
-+ TP_PROTO(old_gid_t gid),
-+ TP_ARGS(gid),
-+ TP_STRUCT__entry(__field(old_gid_t, gid)),
-+ TP_fast_assign(tp_assign(gid, gid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_getsid
-+SC_TRACE_EVENT(sys_getsid,
-+ TP_PROTO(pid_t pid),
-+ TP_ARGS(pid),
-+ TP_STRUCT__entry(__field(pid_t, pid)),
-+ TP_fast_assign(tp_assign(pid, pid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_fdatasync
-+SC_TRACE_EVENT(sys_fdatasync,
-+ TP_PROTO(unsigned int fd),
-+ TP_ARGS(fd),
-+ TP_STRUCT__entry(__field(unsigned int, fd)),
-+ TP_fast_assign(tp_assign(fd, fd)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_mlockall
-+SC_TRACE_EVENT(sys_mlockall,
-+ TP_PROTO(int flags),
-+ TP_ARGS(flags),
-+ TP_STRUCT__entry(__field(int, flags)),
-+ TP_fast_assign(tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_sched_getscheduler
-+SC_TRACE_EVENT(sys_sched_getscheduler,
-+ TP_PROTO(pid_t pid),
-+ TP_ARGS(pid),
-+ TP_STRUCT__entry(__field(pid_t, pid)),
-+ TP_fast_assign(tp_assign(pid, pid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_sched_get_priority_max
-+SC_TRACE_EVENT(sys_sched_get_priority_max,
-+ TP_PROTO(int policy),
-+ TP_ARGS(policy),
-+ TP_STRUCT__entry(__field(int, policy)),
-+ TP_fast_assign(tp_assign(policy, policy)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_sched_get_priority_min
-+SC_TRACE_EVENT(sys_sched_get_priority_min,
-+ TP_PROTO(int policy),
-+ TP_ARGS(policy),
-+ TP_STRUCT__entry(__field(int, policy)),
-+ TP_fast_assign(tp_assign(policy, policy)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setuid
-+SC_TRACE_EVENT(sys_setuid,
-+ TP_PROTO(uid_t uid),
-+ TP_ARGS(uid),
-+ TP_STRUCT__entry(__field(uid_t, uid)),
-+ TP_fast_assign(tp_assign(uid, uid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setgid
-+SC_TRACE_EVENT(sys_setgid,
-+ TP_PROTO(gid_t gid),
-+ TP_ARGS(gid),
-+ TP_STRUCT__entry(__field(gid_t, gid)),
-+ TP_fast_assign(tp_assign(gid, gid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setfsuid
-+SC_TRACE_EVENT(sys_setfsuid,
-+ TP_PROTO(uid_t uid),
-+ TP_ARGS(uid),
-+ TP_STRUCT__entry(__field(uid_t, uid)),
-+ TP_fast_assign(tp_assign(uid, uid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setfsgid
-+SC_TRACE_EVENT(sys_setfsgid,
-+ TP_PROTO(gid_t gid),
-+ TP_ARGS(gid),
-+ TP_STRUCT__entry(__field(gid_t, gid)),
-+ TP_fast_assign(tp_assign(gid, gid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_io_destroy
-+SC_TRACE_EVENT(sys_io_destroy,
-+ TP_PROTO(aio_context_t ctx),
-+ TP_ARGS(ctx),
-+ TP_STRUCT__entry(__field(aio_context_t, ctx)),
-+ TP_fast_assign(tp_assign(ctx, ctx)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_exit_group
-+SC_TRACE_EVENT(sys_exit_group,
-+ TP_PROTO(int error_code),
-+ TP_ARGS(error_code),
-+ TP_STRUCT__entry(__field(int, error_code)),
-+ TP_fast_assign(tp_assign(error_code, error_code)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_epoll_create
-+SC_TRACE_EVENT(sys_epoll_create,
-+ TP_PROTO(int size),
-+ TP_ARGS(size),
-+ TP_STRUCT__entry(__field(int, size)),
-+ TP_fast_assign(tp_assign(size, size)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_timer_getoverrun
-+SC_TRACE_EVENT(sys_timer_getoverrun,
-+ TP_PROTO(timer_t timer_id),
-+ TP_ARGS(timer_id),
-+ TP_STRUCT__entry(__field(timer_t, timer_id)),
-+ TP_fast_assign(tp_assign(timer_id, timer_id)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_timer_delete
-+SC_TRACE_EVENT(sys_timer_delete,
-+ TP_PROTO(timer_t timer_id),
-+ TP_ARGS(timer_id),
-+ TP_STRUCT__entry(__field(timer_t, timer_id)),
-+ TP_fast_assign(tp_assign(timer_id, timer_id)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_unshare
-+SC_TRACE_EVENT(sys_unshare,
-+ TP_PROTO(unsigned long unshare_flags),
-+ TP_ARGS(unshare_flags),
-+ TP_STRUCT__entry(__field(unsigned long, unshare_flags)),
-+ TP_fast_assign(tp_assign(unshare_flags, unshare_flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_eventfd
-+SC_TRACE_EVENT(sys_eventfd,
-+ TP_PROTO(unsigned int count),
-+ TP_ARGS(count),
-+ TP_STRUCT__entry(__field(unsigned int, count)),
-+ TP_fast_assign(tp_assign(count, count)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_epoll_create1
-+SC_TRACE_EVENT(sys_epoll_create1,
-+ TP_PROTO(int flags),
-+ TP_ARGS(flags),
-+ TP_STRUCT__entry(__field(int, flags)),
-+ TP_fast_assign(tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_inotify_init1
-+SC_TRACE_EVENT(sys_inotify_init1,
-+ TP_PROTO(int flags),
-+ TP_ARGS(flags),
-+ TP_STRUCT__entry(__field(int, flags)),
-+ TP_fast_assign(tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_syncfs
-+SC_TRACE_EVENT(sys_syncfs,
-+ TP_PROTO(int fd),
-+ TP_ARGS(fd),
-+ TP_STRUCT__entry(__field(int, fd)),
-+ TP_fast_assign(tp_assign(fd, fd)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_kill
-+SC_TRACE_EVENT(sys_kill,
-+ TP_PROTO(pid_t pid, int sig),
-+ TP_ARGS(pid, sig),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_signal
-+SC_TRACE_EVENT(sys_signal,
-+ TP_PROTO(int sig, __sighandler_t handler),
-+ TP_ARGS(sig, handler),
-+ TP_STRUCT__entry(__field(int, sig) __field(__sighandler_t, handler)),
-+ TP_fast_assign(tp_assign(sig, sig) tp_assign(handler, handler)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setpgid
-+SC_TRACE_EVENT(sys_setpgid,
-+ TP_PROTO(pid_t pid, pid_t pgid),
-+ TP_ARGS(pid, pgid),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field(pid_t, pgid)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(pgid, pgid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_dup2
-+SC_TRACE_EVENT(sys_dup2,
-+ TP_PROTO(unsigned int oldfd, unsigned int newfd),
-+ TP_ARGS(oldfd, newfd),
-+ TP_STRUCT__entry(__field(unsigned int, oldfd) __field(unsigned int, newfd)),
-+ TP_fast_assign(tp_assign(oldfd, oldfd) tp_assign(newfd, newfd)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setreuid16
-+SC_TRACE_EVENT(sys_setreuid16,
-+ TP_PROTO(old_uid_t ruid, old_uid_t euid),
-+ TP_ARGS(ruid, euid),
-+ TP_STRUCT__entry(__field(old_uid_t, ruid) __field(old_uid_t, euid)),
-+ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setregid16
-+SC_TRACE_EVENT(sys_setregid16,
-+ TP_PROTO(old_gid_t rgid, old_gid_t egid),
-+ TP_ARGS(rgid, egid),
-+ TP_STRUCT__entry(__field(old_gid_t, rgid) __field(old_gid_t, egid)),
-+ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_munmap
-+SC_TRACE_EVENT(sys_munmap,
-+ TP_PROTO(unsigned long addr, size_t len),
-+ TP_ARGS(addr, len),
-+ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(size_t, len)),
-+ TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_ftruncate
-+SC_TRACE_EVENT(sys_ftruncate,
-+ TP_PROTO(unsigned int fd, unsigned long length),
-+ TP_ARGS(fd, length),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned long, length)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(length, length)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_fchmod
-+SC_TRACE_EVENT(sys_fchmod,
-+ TP_PROTO(unsigned int fd, mode_t mode),
-+ TP_ARGS(fd, mode),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field(mode_t, mode)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(mode, mode)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_getpriority
-+SC_TRACE_EVENT(sys_getpriority,
-+ TP_PROTO(int which, int who),
-+ TP_ARGS(which, who),
-+ TP_STRUCT__entry(__field(int, which) __field(int, who)),
-+ TP_fast_assign(tp_assign(which, which) tp_assign(who, who)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_bdflush
-+SC_TRACE_EVENT(sys_bdflush,
-+ TP_PROTO(int func, long data),
-+ TP_ARGS(func, data),
-+ TP_STRUCT__entry(__field(int, func) __field(long, data)),
-+ TP_fast_assign(tp_assign(func, func) tp_assign(data, data)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_flock
-+SC_TRACE_EVENT(sys_flock,
-+ TP_PROTO(unsigned int fd, unsigned int cmd),
-+ TP_ARGS(fd, cmd),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_mlock
-+SC_TRACE_EVENT(sys_mlock,
-+ TP_PROTO(unsigned long start, size_t len),
-+ TP_ARGS(start, len),
-+ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len)),
-+ TP_fast_assign(tp_assign(start, start) tp_assign(len, len)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_munlock
-+SC_TRACE_EVENT(sys_munlock,
-+ TP_PROTO(unsigned long start, size_t len),
-+ TP_ARGS(start, len),
-+ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len)),
-+ TP_fast_assign(tp_assign(start, start) tp_assign(len, len)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setreuid
-+SC_TRACE_EVENT(sys_setreuid,
-+ TP_PROTO(uid_t ruid, uid_t euid),
-+ TP_ARGS(ruid, euid),
-+ TP_STRUCT__entry(__field(uid_t, ruid) __field(uid_t, euid)),
-+ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setregid
-+SC_TRACE_EVENT(sys_setregid,
-+ TP_PROTO(gid_t rgid, gid_t egid),
-+ TP_ARGS(rgid, egid),
-+ TP_STRUCT__entry(__field(gid_t, rgid) __field(gid_t, egid)),
-+ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_tkill
-+SC_TRACE_EVENT(sys_tkill,
-+ TP_PROTO(pid_t pid, int sig),
-+ TP_ARGS(pid, sig),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_ioprio_get
-+SC_TRACE_EVENT(sys_ioprio_get,
-+ TP_PROTO(int which, int who),
-+ TP_ARGS(which, who),
-+ TP_STRUCT__entry(__field(int, which) __field(int, who)),
-+ TP_fast_assign(tp_assign(which, which) tp_assign(who, who)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_inotify_rm_watch
-+SC_TRACE_EVENT(sys_inotify_rm_watch,
-+ TP_PROTO(int fd, __s32 wd),
-+ TP_ARGS(fd, wd),
-+ TP_STRUCT__entry(__field(int, fd) __field(__s32, wd)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(wd, wd)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_timerfd_create
-+SC_TRACE_EVENT(sys_timerfd_create,
-+ TP_PROTO(int clockid, int flags),
-+ TP_ARGS(clockid, flags),
-+ TP_STRUCT__entry(__field(int, clockid) __field(int, flags)),
-+ TP_fast_assign(tp_assign(clockid, clockid) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_eventfd2
-+SC_TRACE_EVENT(sys_eventfd2,
-+ TP_PROTO(unsigned int count, int flags),
-+ TP_ARGS(count, flags),
-+ TP_STRUCT__entry(__field(unsigned int, count) __field(int, flags)),
-+ TP_fast_assign(tp_assign(count, count) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_fanotify_init
-+SC_TRACE_EVENT(sys_fanotify_init,
-+ TP_PROTO(unsigned int flags, unsigned int event_f_flags),
-+ TP_ARGS(flags, event_f_flags),
-+ TP_STRUCT__entry(__field(unsigned int, flags) __field(unsigned int, event_f_flags)),
-+ TP_fast_assign(tp_assign(flags, flags) tp_assign(event_f_flags, event_f_flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setns
-+SC_TRACE_EVENT(sys_setns,
-+ TP_PROTO(int fd, int nstype),
-+ TP_ARGS(fd, nstype),
-+ TP_STRUCT__entry(__field(int, fd) __field(int, nstype)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(nstype, nstype)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_lseek
-+SC_TRACE_EVENT(sys_lseek,
-+ TP_PROTO(unsigned int fd, off_t offset, unsigned int origin),
-+ TP_ARGS(fd, offset, origin),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field(off_t, offset) __field(unsigned int, origin)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(offset, offset) tp_assign(origin, origin)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_ioctl
-+SC_TRACE_EVENT(sys_ioctl,
-+ TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
-+ TP_ARGS(fd, cmd, arg),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_fcntl
-+SC_TRACE_EVENT(sys_fcntl,
-+ TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
-+ TP_ARGS(fd, cmd, arg),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_fchown16
-+SC_TRACE_EVENT(sys_fchown16,
-+ TP_PROTO(unsigned int fd, old_uid_t user, old_gid_t group),
-+ TP_ARGS(fd, user, group),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field(old_uid_t, user) __field(old_gid_t, group)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(user, user) tp_assign(group, group)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setpriority
-+SC_TRACE_EVENT(sys_setpriority,
-+ TP_PROTO(int which, int who, int niceval),
-+ TP_ARGS(which, who, niceval),
-+ TP_STRUCT__entry(__field(int, which) __field(int, who) __field(int, niceval)),
-+ TP_fast_assign(tp_assign(which, which) tp_assign(who, who) tp_assign(niceval, niceval)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_mprotect
-+SC_TRACE_EVENT(sys_mprotect,
-+ TP_PROTO(unsigned long start, size_t len, unsigned long prot),
-+ TP_ARGS(start, len, prot),
-+ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field(unsigned long, prot)),
-+ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(prot, prot)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_sysfs
-+SC_TRACE_EVENT(sys_sysfs,
-+ TP_PROTO(int option, unsigned long arg1, unsigned long arg2),
-+ TP_ARGS(option, arg1, arg2),
-+ TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg1) __field(unsigned long, arg2)),
-+ TP_fast_assign(tp_assign(option, option) tp_assign(arg1, arg1) tp_assign(arg2, arg2)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_msync
-+SC_TRACE_EVENT(sys_msync,
-+ TP_PROTO(unsigned long start, size_t len, int flags),
-+ TP_ARGS(start, len, flags),
-+ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field(int, flags)),
-+ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setresuid16
-+SC_TRACE_EVENT(sys_setresuid16,
-+ TP_PROTO(old_uid_t ruid, old_uid_t euid, old_uid_t suid),
-+ TP_ARGS(ruid, euid, suid),
-+ TP_STRUCT__entry(__field(old_uid_t, ruid) __field(old_uid_t, euid) __field(old_uid_t, suid)),
-+ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setresgid16
-+SC_TRACE_EVENT(sys_setresgid16,
-+ TP_PROTO(old_gid_t rgid, old_gid_t egid, old_gid_t sgid),
-+ TP_ARGS(rgid, egid, sgid),
-+ TP_STRUCT__entry(__field(old_gid_t, rgid) __field(old_gid_t, egid) __field(old_gid_t, sgid)),
-+ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_fchown
-+SC_TRACE_EVENT(sys_fchown,
-+ TP_PROTO(unsigned int fd, uid_t user, gid_t group),
-+ TP_ARGS(fd, user, group),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field(uid_t, user) __field(gid_t, group)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(user, user) tp_assign(group, group)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setresuid
-+SC_TRACE_EVENT(sys_setresuid,
-+ TP_PROTO(uid_t ruid, uid_t euid, uid_t suid),
-+ TP_ARGS(ruid, euid, suid),
-+ TP_STRUCT__entry(__field(uid_t, ruid) __field(uid_t, euid) __field(uid_t, suid)),
-+ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setresgid
-+SC_TRACE_EVENT(sys_setresgid,
-+ TP_PROTO(gid_t rgid, gid_t egid, gid_t sgid),
-+ TP_ARGS(rgid, egid, sgid),
-+ TP_STRUCT__entry(__field(gid_t, rgid) __field(gid_t, egid) __field(gid_t, sgid)),
-+ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_madvise
-+SC_TRACE_EVENT(sys_madvise,
-+ TP_PROTO(unsigned long start, size_t len_in, int behavior),
-+ TP_ARGS(start, len_in, behavior),
-+ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len_in) __field(int, behavior)),
-+ TP_fast_assign(tp_assign(start, start) tp_assign(len_in, len_in) tp_assign(behavior, behavior)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_fcntl64
-+SC_TRACE_EVENT(sys_fcntl64,
-+ TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
-+ TP_ARGS(fd, cmd, arg),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_tgkill
-+SC_TRACE_EVENT(sys_tgkill,
-+ TP_PROTO(pid_t tgid, pid_t pid, int sig),
-+ TP_ARGS(tgid, pid, sig),
-+ TP_STRUCT__entry(__field(pid_t, tgid) __field(pid_t, pid) __field(int, sig)),
-+ TP_fast_assign(tp_assign(tgid, tgid) tp_assign(pid, pid) tp_assign(sig, sig)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_ioprio_set
-+SC_TRACE_EVENT(sys_ioprio_set,
-+ TP_PROTO(int which, int who, int ioprio),
-+ TP_ARGS(which, who, ioprio),
-+ TP_STRUCT__entry(__field(int, which) __field(int, who) __field(int, ioprio)),
-+ TP_fast_assign(tp_assign(which, which) tp_assign(who, who) tp_assign(ioprio, ioprio)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_dup3
-+SC_TRACE_EVENT(sys_dup3,
-+ TP_PROTO(unsigned int oldfd, unsigned int newfd, int flags),
-+ TP_ARGS(oldfd, newfd, flags),
-+ TP_STRUCT__entry(__field(unsigned int, oldfd) __field(unsigned int, newfd) __field(int, flags)),
-+ TP_fast_assign(tp_assign(oldfd, oldfd) tp_assign(newfd, newfd) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_ptrace
-+SC_TRACE_EVENT(sys_ptrace,
-+ TP_PROTO(long request, long pid, unsigned long addr, unsigned long data),
-+ TP_ARGS(request, pid, addr, data),
-+ TP_STRUCT__entry(__field(long, request) __field(long, pid) __field_hex(unsigned long, addr) __field(unsigned long, data)),
-+ TP_fast_assign(tp_assign(request, request) tp_assign(pid, pid) tp_assign(addr, addr) tp_assign(data, data)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_tee
-+SC_TRACE_EVENT(sys_tee,
-+ TP_PROTO(int fdin, int fdout, size_t len, unsigned int flags),
-+ TP_ARGS(fdin, fdout, len, flags),
-+ TP_STRUCT__entry(__field(int, fdin) __field(int, fdout) __field(size_t, len) __field(unsigned int, flags)),
-+ TP_fast_assign(tp_assign(fdin, fdin) tp_assign(fdout, fdout) tp_assign(len, len) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_mremap
-+SC_TRACE_EVENT(sys_mremap,
-+ TP_PROTO(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr),
-+ TP_ARGS(addr, old_len, new_len, flags, new_addr),
-+ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(unsigned long, old_len) __field(unsigned long, new_len) __field(unsigned long, flags) __field_hex(unsigned long, new_addr)),
-+ TP_fast_assign(tp_assign(addr, addr) tp_assign(old_len, old_len) tp_assign(new_len, new_len) tp_assign(flags, flags) tp_assign(new_addr, new_addr)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_prctl
-+SC_TRACE_EVENT(sys_prctl,
-+ TP_PROTO(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5),
-+ TP_ARGS(option, arg2, arg3, arg4, arg5),
-+ TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg2) __field(unsigned long, arg3) __field(unsigned long, arg4) __field(unsigned long, arg5)),
-+ TP_fast_assign(tp_assign(option, option) tp_assign(arg2, arg2) tp_assign(arg3, arg3) tp_assign(arg4, arg4) tp_assign(arg5, arg5)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_remap_file_pages
-+SC_TRACE_EVENT(sys_remap_file_pages,
-+ TP_PROTO(unsigned long start, unsigned long size, unsigned long prot, unsigned long pgoff, unsigned long flags),
-+ TP_ARGS(start, size, prot, pgoff, flags),
-+ TP_STRUCT__entry(__field(unsigned long, start) __field(unsigned long, size) __field(unsigned long, prot) __field(unsigned long, pgoff) __field(unsigned long, flags)),
-+ TP_fast_assign(tp_assign(start, start) tp_assign(size, size) tp_assign(prot, prot) tp_assign(pgoff, pgoff) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_keyctl
-+SC_TRACE_EVENT(sys_keyctl,
-+ TP_PROTO(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5),
-+ TP_ARGS(option, arg2, arg3, arg4, arg5),
-+ TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg2) __field(unsigned long, arg3) __field(unsigned long, arg4) __field(unsigned long, arg5)),
-+ TP_fast_assign(tp_assign(option, option) tp_assign(arg2, arg2) tp_assign(arg3, arg3) tp_assign(arg4, arg4) tp_assign(arg5, arg5)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_mmap_pgoff
-+SC_TRACE_EVENT(sys_mmap_pgoff,
-+ TP_PROTO(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff),
-+ TP_ARGS(addr, len, prot, flags, fd, pgoff),
-+ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(unsigned long, len) __field(unsigned long, prot) __field(unsigned long, flags) __field(unsigned long, fd) __field(unsigned long, pgoff)),
-+ TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len) tp_assign(prot, prot) tp_assign(flags, flags) tp_assign(fd, fd) tp_assign(pgoff, pgoff)),
-+ TP_printk()
-+)
-+#endif
-+
-+#endif /* _TRACE_SYSCALLS_INTEGERS_H */
-+
-+/* This part must be outside protection */
-+#include "../../../probes/define_trace.h"
-+
-+#else /* CREATE_SYSCALL_TABLE */
-+
-+#include "x86-32-syscalls-3.1.0-rc6_integers_override.h"
-+#include "syscalls_integers_override.h"
-+
-+#ifndef OVERRIDE_TABLE_32_sys_restart_syscall
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_restart_syscall, 0, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getpid
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getpid, 20, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getuid16
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getuid16, 24, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_pause
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_pause, 29, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_sync
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sync, 36, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getgid16
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getgid16, 47, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_geteuid16
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_geteuid16, 49, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getegid16
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getegid16, 50, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getppid
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getppid, 64, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getpgrp
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getpgrp, 65, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setsid
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_setsid, 66, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_sgetmask
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sgetmask, 68, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_vhangup
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_vhangup, 111, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_munlockall
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_munlockall, 153, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_sched_yield
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sched_yield, 158, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getuid
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getuid, 199, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getgid
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getgid, 200, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_geteuid
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_geteuid, 201, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getegid
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getegid, 202, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_gettid
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_gettid, 224, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_inotify_init
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_inotify_init, 291, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_exit
-+TRACE_SYSCALL_TABLE(sys_exit, sys_exit, 1, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_close
-+TRACE_SYSCALL_TABLE(sys_close, sys_close, 6, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_lseek
-+TRACE_SYSCALL_TABLE(sys_lseek, sys_lseek, 19, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setuid16
-+TRACE_SYSCALL_TABLE(sys_setuid16, sys_setuid16, 23, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_ptrace
-+TRACE_SYSCALL_TABLE(sys_ptrace, sys_ptrace, 26, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_alarm
-+TRACE_SYSCALL_TABLE(sys_alarm, sys_alarm, 27, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_nice
-+TRACE_SYSCALL_TABLE(sys_nice, sys_nice, 34, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_kill
-+TRACE_SYSCALL_TABLE(sys_kill, sys_kill, 37, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_dup
-+TRACE_SYSCALL_TABLE(sys_dup, sys_dup, 41, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_brk
-+TRACE_SYSCALL_TABLE(sys_brk, sys_brk, 45, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setgid16
-+TRACE_SYSCALL_TABLE(sys_setgid16, sys_setgid16, 46, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_signal
-+TRACE_SYSCALL_TABLE(sys_signal, sys_signal, 48, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_ioctl
-+TRACE_SYSCALL_TABLE(sys_ioctl, sys_ioctl, 54, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_fcntl
-+TRACE_SYSCALL_TABLE(sys_fcntl, sys_fcntl, 55, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setpgid
-+TRACE_SYSCALL_TABLE(sys_setpgid, sys_setpgid, 57, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_umask
-+TRACE_SYSCALL_TABLE(sys_umask, sys_umask, 60, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_dup2
-+TRACE_SYSCALL_TABLE(sys_dup2, sys_dup2, 63, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_ssetmask
-+TRACE_SYSCALL_TABLE(sys_ssetmask, sys_ssetmask, 69, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setreuid16
-+TRACE_SYSCALL_TABLE(sys_setreuid16, sys_setreuid16, 70, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setregid16
-+TRACE_SYSCALL_TABLE(sys_setregid16, sys_setregid16, 71, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_munmap
-+TRACE_SYSCALL_TABLE(sys_munmap, sys_munmap, 91, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_ftruncate
-+TRACE_SYSCALL_TABLE(sys_ftruncate, sys_ftruncate, 93, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_fchmod
-+TRACE_SYSCALL_TABLE(sys_fchmod, sys_fchmod, 94, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_fchown16
-+TRACE_SYSCALL_TABLE(sys_fchown16, sys_fchown16, 95, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getpriority
-+TRACE_SYSCALL_TABLE(sys_getpriority, sys_getpriority, 96, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setpriority
-+TRACE_SYSCALL_TABLE(sys_setpriority, sys_setpriority, 97, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_fsync
-+TRACE_SYSCALL_TABLE(sys_fsync, sys_fsync, 118, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_mprotect
-+TRACE_SYSCALL_TABLE(sys_mprotect, sys_mprotect, 125, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getpgid
-+TRACE_SYSCALL_TABLE(sys_getpgid, sys_getpgid, 132, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_fchdir
-+TRACE_SYSCALL_TABLE(sys_fchdir, sys_fchdir, 133, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_bdflush
-+TRACE_SYSCALL_TABLE(sys_bdflush, sys_bdflush, 134, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_sysfs
-+TRACE_SYSCALL_TABLE(sys_sysfs, sys_sysfs, 135, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_personality
-+TRACE_SYSCALL_TABLE(sys_personality, sys_personality, 136, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setfsuid16
-+TRACE_SYSCALL_TABLE(sys_setfsuid16, sys_setfsuid16, 138, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setfsgid16
-+TRACE_SYSCALL_TABLE(sys_setfsgid16, sys_setfsgid16, 139, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_flock
-+TRACE_SYSCALL_TABLE(sys_flock, sys_flock, 143, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_msync
-+TRACE_SYSCALL_TABLE(sys_msync, sys_msync, 144, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getsid
-+TRACE_SYSCALL_TABLE(sys_getsid, sys_getsid, 147, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_fdatasync
-+TRACE_SYSCALL_TABLE(sys_fdatasync, sys_fdatasync, 148, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_mlock
-+TRACE_SYSCALL_TABLE(sys_mlock, sys_mlock, 150, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_munlock
-+TRACE_SYSCALL_TABLE(sys_munlock, sys_munlock, 151, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_mlockall
-+TRACE_SYSCALL_TABLE(sys_mlockall, sys_mlockall, 152, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_sched_getscheduler
-+TRACE_SYSCALL_TABLE(sys_sched_getscheduler, sys_sched_getscheduler, 157, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_sched_get_priority_max
-+TRACE_SYSCALL_TABLE(sys_sched_get_priority_max, sys_sched_get_priority_max, 159, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_sched_get_priority_min
-+TRACE_SYSCALL_TABLE(sys_sched_get_priority_min, sys_sched_get_priority_min, 160, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_mremap
-+TRACE_SYSCALL_TABLE(sys_mremap, sys_mremap, 163, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setresuid16
-+TRACE_SYSCALL_TABLE(sys_setresuid16, sys_setresuid16, 164, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setresgid16
-+TRACE_SYSCALL_TABLE(sys_setresgid16, sys_setresgid16, 170, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_prctl
-+TRACE_SYSCALL_TABLE(sys_prctl, sys_prctl, 172, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_mmap_pgoff
-+TRACE_SYSCALL_TABLE(sys_mmap_pgoff, sys_mmap_pgoff, 192, 6)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setreuid
-+TRACE_SYSCALL_TABLE(sys_setreuid, sys_setreuid, 203, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setregid
-+TRACE_SYSCALL_TABLE(sys_setregid, sys_setregid, 204, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_fchown
-+TRACE_SYSCALL_TABLE(sys_fchown, sys_fchown, 207, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setresuid
-+TRACE_SYSCALL_TABLE(sys_setresuid, sys_setresuid, 208, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setresgid
-+TRACE_SYSCALL_TABLE(sys_setresgid, sys_setresgid, 210, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setuid
-+TRACE_SYSCALL_TABLE(sys_setuid, sys_setuid, 213, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setgid
-+TRACE_SYSCALL_TABLE(sys_setgid, sys_setgid, 214, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setfsuid
-+TRACE_SYSCALL_TABLE(sys_setfsuid, sys_setfsuid, 215, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setfsgid
-+TRACE_SYSCALL_TABLE(sys_setfsgid, sys_setfsgid, 216, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_madvise
-+TRACE_SYSCALL_TABLE(sys_madvise, sys_madvise, 219, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_fcntl64
-+TRACE_SYSCALL_TABLE(sys_fcntl64, sys_fcntl64, 221, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_tkill
-+TRACE_SYSCALL_TABLE(sys_tkill, sys_tkill, 238, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_io_destroy
-+TRACE_SYSCALL_TABLE(sys_io_destroy, sys_io_destroy, 246, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_exit_group
-+TRACE_SYSCALL_TABLE(sys_exit_group, sys_exit_group, 252, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_epoll_create
-+TRACE_SYSCALL_TABLE(sys_epoll_create, sys_epoll_create, 254, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_remap_file_pages
-+TRACE_SYSCALL_TABLE(sys_remap_file_pages, sys_remap_file_pages, 257, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_timer_getoverrun
-+TRACE_SYSCALL_TABLE(sys_timer_getoverrun, sys_timer_getoverrun, 262, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_timer_delete
-+TRACE_SYSCALL_TABLE(sys_timer_delete, sys_timer_delete, 263, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_tgkill
-+TRACE_SYSCALL_TABLE(sys_tgkill, sys_tgkill, 270, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_keyctl
-+TRACE_SYSCALL_TABLE(sys_keyctl, sys_keyctl, 288, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_ioprio_set
-+TRACE_SYSCALL_TABLE(sys_ioprio_set, sys_ioprio_set, 289, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_ioprio_get
-+TRACE_SYSCALL_TABLE(sys_ioprio_get, sys_ioprio_get, 290, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_inotify_rm_watch
-+TRACE_SYSCALL_TABLE(sys_inotify_rm_watch, sys_inotify_rm_watch, 293, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_unshare
-+TRACE_SYSCALL_TABLE(sys_unshare, sys_unshare, 310, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_tee
-+TRACE_SYSCALL_TABLE(sys_tee, sys_tee, 315, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_timerfd_create
-+TRACE_SYSCALL_TABLE(sys_timerfd_create, sys_timerfd_create, 322, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_eventfd
-+TRACE_SYSCALL_TABLE(sys_eventfd, sys_eventfd, 323, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_eventfd2
-+TRACE_SYSCALL_TABLE(sys_eventfd2, sys_eventfd2, 328, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_epoll_create1
-+TRACE_SYSCALL_TABLE(sys_epoll_create1, sys_epoll_create1, 329, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_dup3
-+TRACE_SYSCALL_TABLE(sys_dup3, sys_dup3, 330, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_inotify_init1
-+TRACE_SYSCALL_TABLE(sys_inotify_init1, sys_inotify_init1, 332, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_fanotify_init
-+TRACE_SYSCALL_TABLE(sys_fanotify_init, sys_fanotify_init, 338, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_syncfs
-+TRACE_SYSCALL_TABLE(sys_syncfs, sys_syncfs, 344, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setns
-+TRACE_SYSCALL_TABLE(sys_setns, sys_setns, 346, 2)
-+#endif
-+
-+#endif /* CREATE_SYSCALL_TABLE */
-diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_integers_override.h b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_integers_override.h
-new file mode 100644
-index 0000000..ed2cf1f
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_integers_override.h
-@@ -0,0 +1,38 @@
-+#ifndef CONFIG_UID16
-+
-+#define OVERRIDE_32_sys_getuid16
-+#define OVERRIDE_32_sys_getgid16
-+#define OVERRIDE_32_sys_geteuid16
-+#define OVERRIDE_32_sys_getegid16
-+#define OVERRIDE_32_sys_setuid16
-+#define OVERRIDE_32_sys_setgid16
-+#define OVERRIDE_32_sys_setfsuid16
-+#define OVERRIDE_32_sys_setfsgid16
-+#define OVERRIDE_32_sys_setreuid16
-+#define OVERRIDE_32_sys_setregid16
-+#define OVERRIDE_32_sys_fchown16
-+#define OVERRIDE_32_sys_setresuid16
-+#define OVERRIDE_32_sys_setresgid16
-+
-+#define OVERRIDE_TABLE_32_sys_getuid16
-+#define OVERRIDE_TABLE_32_sys_getgid16
-+#define OVERRIDE_TABLE_32_sys_geteuid16
-+#define OVERRIDE_TABLE_32_sys_getegid16
-+#define OVERRIDE_TABLE_32_sys_setuid16
-+#define OVERRIDE_TABLE_32_sys_setgid16
-+#define OVERRIDE_TABLE_32_sys_setreuid16
-+#define OVERRIDE_TABLE_32_sys_setregid16
-+#define OVERRIDE_TABLE_32_sys_fchown16
-+#define OVERRIDE_TABLE_32_sys_setfsuid16
-+#define OVERRIDE_TABLE_32_sys_setfsgid16
-+#define OVERRIDE_TABLE_32_sys_setresuid16
-+#define OVERRIDE_TABLE_32_sys_setresgid16
-+
-+#endif
-+
-+#ifdef CREATE_SYSCALL_TABLE
-+
-+#define OVERRIDE_TABLE_32_sys_mmap
-+TRACE_SYSCALL_TABLE(sys_mmap, sys_mmap, 90, 6)
-+
-+#endif
-diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers.h b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers.h
-new file mode 100644
-index 0000000..ec5b301
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers.h
-@@ -0,0 +1,2232 @@
-+/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */
-+#ifndef CREATE_SYSCALL_TABLE
-+
-+#if !defined(_TRACE_SYSCALLS_POINTERS_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_SYSCALLS_POINTERS_H
-+
-+#include <linux/tracepoint.h>
-+#include <linux/syscalls.h>
-+#include "x86-32-syscalls-3.1.0-rc6_pointers_override.h"
-+#include "syscalls_pointers_override.h"
-+
-+#ifndef OVERRIDE_32_sys_unlink
-+SC_TRACE_EVENT(sys_unlink,
-+ TP_PROTO(const char * pathname),
-+ TP_ARGS(pathname),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_chdir
-+SC_TRACE_EVENT(sys_chdir,
-+ TP_PROTO(const char * filename),
-+ TP_ARGS(filename),
-+ TP_STRUCT__entry(__string_from_user(filename, filename)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_time
-+SC_TRACE_EVENT(sys_time,
-+ TP_PROTO(time_t * tloc),
-+ TP_ARGS(tloc),
-+ TP_STRUCT__entry(__field_hex(time_t *, tloc)),
-+ TP_fast_assign(tp_assign(tloc, tloc)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_oldumount
-+SC_TRACE_EVENT(sys_oldumount,
-+ TP_PROTO(char * name),
-+ TP_ARGS(name),
-+ TP_STRUCT__entry(__string_from_user(name, name)),
-+ TP_fast_assign(tp_copy_string_from_user(name, name)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_stime
-+SC_TRACE_EVENT(sys_stime,
-+ TP_PROTO(time_t * tptr),
-+ TP_ARGS(tptr),
-+ TP_STRUCT__entry(__field_hex(time_t *, tptr)),
-+ TP_fast_assign(tp_assign(tptr, tptr)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_rmdir
-+SC_TRACE_EVENT(sys_rmdir,
-+ TP_PROTO(const char * pathname),
-+ TP_ARGS(pathname),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_pipe
-+SC_TRACE_EVENT(sys_pipe,
-+ TP_PROTO(int * fildes),
-+ TP_ARGS(fildes),
-+ TP_STRUCT__entry(__field_hex(int *, fildes)),
-+ TP_fast_assign(tp_assign(fildes, fildes)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_times
-+SC_TRACE_EVENT(sys_times,
-+ TP_PROTO(struct tms * tbuf),
-+ TP_ARGS(tbuf),
-+ TP_STRUCT__entry(__field_hex(struct tms *, tbuf)),
-+ TP_fast_assign(tp_assign(tbuf, tbuf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_acct
-+SC_TRACE_EVENT(sys_acct,
-+ TP_PROTO(const char * name),
-+ TP_ARGS(name),
-+ TP_STRUCT__entry(__string_from_user(name, name)),
-+ TP_fast_assign(tp_copy_string_from_user(name, name)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_olduname
-+SC_TRACE_EVENT(sys_olduname,
-+ TP_PROTO(struct oldold_utsname * name),
-+ TP_ARGS(name),
-+ TP_STRUCT__entry(__field_hex(struct oldold_utsname *, name)),
-+ TP_fast_assign(tp_assign(name, name)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_chroot
-+SC_TRACE_EVENT(sys_chroot,
-+ TP_PROTO(const char * filename),
-+ TP_ARGS(filename),
-+ TP_STRUCT__entry(__string_from_user(filename, filename)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_sigpending
-+SC_TRACE_EVENT(sys_sigpending,
-+ TP_PROTO(old_sigset_t * set),
-+ TP_ARGS(set),
-+ TP_STRUCT__entry(__field_hex(old_sigset_t *, set)),
-+ TP_fast_assign(tp_assign(set, set)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_old_select
-+SC_TRACE_EVENT(sys_old_select,
-+ TP_PROTO(struct sel_arg_struct * arg),
-+ TP_ARGS(arg),
-+ TP_STRUCT__entry(__field_hex(struct sel_arg_struct *, arg)),
-+ TP_fast_assign(tp_assign(arg, arg)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_uselib
-+SC_TRACE_EVENT(sys_uselib,
-+ TP_PROTO(const char * library),
-+ TP_ARGS(library),
-+ TP_STRUCT__entry(__field_hex(const char *, library)),
-+ TP_fast_assign(tp_assign(library, library)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_old_mmap
-+SC_TRACE_EVENT(sys_old_mmap,
-+ TP_PROTO(struct mmap_arg_struct * arg),
-+ TP_ARGS(arg),
-+ TP_STRUCT__entry(__field_hex(struct mmap_arg_struct *, arg)),
-+ TP_fast_assign(tp_assign(arg, arg)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_uname
-+SC_TRACE_EVENT(sys_uname,
-+ TP_PROTO(struct old_utsname * name),
-+ TP_ARGS(name),
-+ TP_STRUCT__entry(__field_hex(struct old_utsname *, name)),
-+ TP_fast_assign(tp_assign(name, name)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_swapoff
-+SC_TRACE_EVENT(sys_swapoff,
-+ TP_PROTO(const char * specialfile),
-+ TP_ARGS(specialfile),
-+ TP_STRUCT__entry(__string_from_user(specialfile, specialfile)),
-+ TP_fast_assign(tp_copy_string_from_user(specialfile, specialfile)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_sysinfo
-+SC_TRACE_EVENT(sys_sysinfo,
-+ TP_PROTO(struct sysinfo * info),
-+ TP_ARGS(info),
-+ TP_STRUCT__entry(__field_hex(struct sysinfo *, info)),
-+ TP_fast_assign(tp_assign(info, info)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_newuname
-+SC_TRACE_EVENT(sys_newuname,
-+ TP_PROTO(struct new_utsname * name),
-+ TP_ARGS(name),
-+ TP_STRUCT__entry(__field_hex(struct new_utsname *, name)),
-+ TP_fast_assign(tp_assign(name, name)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_adjtimex
-+SC_TRACE_EVENT(sys_adjtimex,
-+ TP_PROTO(struct timex * txc_p),
-+ TP_ARGS(txc_p),
-+ TP_STRUCT__entry(__field_hex(struct timex *, txc_p)),
-+ TP_fast_assign(tp_assign(txc_p, txc_p)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_sysctl
-+SC_TRACE_EVENT(sys_sysctl,
-+ TP_PROTO(struct __sysctl_args * args),
-+ TP_ARGS(args),
-+ TP_STRUCT__entry(__field_hex(struct __sysctl_args *, args)),
-+ TP_fast_assign(tp_assign(args, args)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_set_tid_address
-+SC_TRACE_EVENT(sys_set_tid_address,
-+ TP_PROTO(int * tidptr),
-+ TP_ARGS(tidptr),
-+ TP_STRUCT__entry(__field_hex(int *, tidptr)),
-+ TP_fast_assign(tp_assign(tidptr, tidptr)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_mq_unlink
-+SC_TRACE_EVENT(sys_mq_unlink,
-+ TP_PROTO(const char * u_name),
-+ TP_ARGS(u_name),
-+ TP_STRUCT__entry(__string_from_user(u_name, u_name)),
-+ TP_fast_assign(tp_copy_string_from_user(u_name, u_name)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_creat
-+SC_TRACE_EVENT(sys_creat,
-+ TP_PROTO(const char * pathname, int mode),
-+ TP_ARGS(pathname, mode),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(int, mode)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_link
-+SC_TRACE_EVENT(sys_link,
-+ TP_PROTO(const char * oldname, const char * newname),
-+ TP_ARGS(oldname, newname),
-+ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
-+ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_chmod
-+SC_TRACE_EVENT(sys_chmod,
-+ TP_PROTO(const char * filename, mode_t mode),
-+ TP_ARGS(filename, mode),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field(mode_t, mode)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_stat
-+SC_TRACE_EVENT(sys_stat,
-+ TP_PROTO(const char * filename, struct __old_kernel_stat * statbuf),
-+ TP_ARGS(filename, statbuf),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct __old_kernel_stat *, statbuf)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_fstat
-+SC_TRACE_EVENT(sys_fstat,
-+ TP_PROTO(unsigned int fd, struct __old_kernel_stat * statbuf),
-+ TP_ARGS(fd, statbuf),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct __old_kernel_stat *, statbuf)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(statbuf, statbuf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_utime
-+SC_TRACE_EVENT(sys_utime,
-+ TP_PROTO(char * filename, struct utimbuf * times),
-+ TP_ARGS(filename, times),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct utimbuf *, times)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(times, times)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_access
-+SC_TRACE_EVENT(sys_access,
-+ TP_PROTO(const char * filename, int mode),
-+ TP_ARGS(filename, mode),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, mode)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_rename
-+SC_TRACE_EVENT(sys_rename,
-+ TP_PROTO(const char * oldname, const char * newname),
-+ TP_ARGS(oldname, newname),
-+ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
-+ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_mkdir
-+SC_TRACE_EVENT(sys_mkdir,
-+ TP_PROTO(const char * pathname, int mode),
-+ TP_ARGS(pathname, mode),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(int, mode)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_umount
-+SC_TRACE_EVENT(sys_umount,
-+ TP_PROTO(char * name, int flags),
-+ TP_ARGS(name, flags),
-+ TP_STRUCT__entry(__string_from_user(name, name) __field(int, flags)),
-+ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_ustat
-+SC_TRACE_EVENT(sys_ustat,
-+ TP_PROTO(unsigned dev, struct ustat * ubuf),
-+ TP_ARGS(dev, ubuf),
-+ TP_STRUCT__entry(__field(unsigned, dev) __field_hex(struct ustat *, ubuf)),
-+ TP_fast_assign(tp_assign(dev, dev) tp_assign(ubuf, ubuf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_sethostname
-+SC_TRACE_EVENT(sys_sethostname,
-+ TP_PROTO(char * name, int len),
-+ TP_ARGS(name, len),
-+ TP_STRUCT__entry(__string_from_user(name, name) __field(int, len)),
-+ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(len, len)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setrlimit
-+SC_TRACE_EVENT(sys_setrlimit,
-+ TP_PROTO(unsigned int resource, struct rlimit * rlim),
-+ TP_ARGS(resource, rlim),
-+ TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
-+ TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_old_getrlimit
-+SC_TRACE_EVENT(sys_old_getrlimit,
-+ TP_PROTO(unsigned int resource, struct rlimit * rlim),
-+ TP_ARGS(resource, rlim),
-+ TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
-+ TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_getrusage
-+SC_TRACE_EVENT(sys_getrusage,
-+ TP_PROTO(int who, struct rusage * ru),
-+ TP_ARGS(who, ru),
-+ TP_STRUCT__entry(__field(int, who) __field_hex(struct rusage *, ru)),
-+ TP_fast_assign(tp_assign(who, who) tp_assign(ru, ru)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_gettimeofday
-+SC_TRACE_EVENT(sys_gettimeofday,
-+ TP_PROTO(struct timeval * tv, struct timezone * tz),
-+ TP_ARGS(tv, tz),
-+ TP_STRUCT__entry(__field_hex(struct timeval *, tv) __field_hex(struct timezone *, tz)),
-+ TP_fast_assign(tp_assign(tv, tv) tp_assign(tz, tz)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_settimeofday
-+SC_TRACE_EVENT(sys_settimeofday,
-+ TP_PROTO(struct timeval * tv, struct timezone * tz),
-+ TP_ARGS(tv, tz),
-+ TP_STRUCT__entry(__field_hex(struct timeval *, tv) __field_hex(struct timezone *, tz)),
-+ TP_fast_assign(tp_assign(tv, tv) tp_assign(tz, tz)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_getgroups16
-+SC_TRACE_EVENT(sys_getgroups16,
-+ TP_PROTO(int gidsetsize, old_gid_t * grouplist),
-+ TP_ARGS(gidsetsize, grouplist),
-+ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(old_gid_t *, grouplist)),
-+ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setgroups16
-+SC_TRACE_EVENT(sys_setgroups16,
-+ TP_PROTO(int gidsetsize, old_gid_t * grouplist),
-+ TP_ARGS(gidsetsize, grouplist),
-+ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(old_gid_t *, grouplist)),
-+ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_symlink
-+SC_TRACE_EVENT(sys_symlink,
-+ TP_PROTO(const char * oldname, const char * newname),
-+ TP_ARGS(oldname, newname),
-+ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
-+ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_lstat
-+SC_TRACE_EVENT(sys_lstat,
-+ TP_PROTO(const char * filename, struct __old_kernel_stat * statbuf),
-+ TP_ARGS(filename, statbuf),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct __old_kernel_stat *, statbuf)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_swapon
-+SC_TRACE_EVENT(sys_swapon,
-+ TP_PROTO(const char * specialfile, int swap_flags),
-+ TP_ARGS(specialfile, swap_flags),
-+ TP_STRUCT__entry(__string_from_user(specialfile, specialfile) __field(int, swap_flags)),
-+ TP_fast_assign(tp_copy_string_from_user(specialfile, specialfile) tp_assign(swap_flags, swap_flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_truncate
-+SC_TRACE_EVENT(sys_truncate,
-+ TP_PROTO(const char * path, long length),
-+ TP_ARGS(path, length),
-+ TP_STRUCT__entry(__string_from_user(path, path) __field(long, length)),
-+ TP_fast_assign(tp_copy_string_from_user(path, path) tp_assign(length, length)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_statfs
-+SC_TRACE_EVENT(sys_statfs,
-+ TP_PROTO(const char * pathname, struct statfs * buf),
-+ TP_ARGS(pathname, buf),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(struct statfs *, buf)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(buf, buf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_fstatfs
-+SC_TRACE_EVENT(sys_fstatfs,
-+ TP_PROTO(unsigned int fd, struct statfs * buf),
-+ TP_ARGS(fd, buf),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct statfs *, buf)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_socketcall
-+SC_TRACE_EVENT(sys_socketcall,
-+ TP_PROTO(int call, unsigned long * args),
-+ TP_ARGS(call, args),
-+ TP_STRUCT__entry(__field(int, call) __field_hex(unsigned long *, args)),
-+ TP_fast_assign(tp_assign(call, call) tp_assign(args, args)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_getitimer
-+SC_TRACE_EVENT(sys_getitimer,
-+ TP_PROTO(int which, struct itimerval * value),
-+ TP_ARGS(which, value),
-+ TP_STRUCT__entry(__field(int, which) __field_hex(struct itimerval *, value)),
-+ TP_fast_assign(tp_assign(which, which) tp_assign(value, value)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_newstat
-+SC_TRACE_EVENT(sys_newstat,
-+ TP_PROTO(const char * filename, struct stat * statbuf),
-+ TP_ARGS(filename, statbuf),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat *, statbuf)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_newlstat
-+SC_TRACE_EVENT(sys_newlstat,
-+ TP_PROTO(const char * filename, struct stat * statbuf),
-+ TP_ARGS(filename, statbuf),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat *, statbuf)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_newfstat
-+SC_TRACE_EVENT(sys_newfstat,
-+ TP_PROTO(unsigned int fd, struct stat * statbuf),
-+ TP_ARGS(fd, statbuf),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct stat *, statbuf)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(statbuf, statbuf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setdomainname
-+SC_TRACE_EVENT(sys_setdomainname,
-+ TP_PROTO(char * name, int len),
-+ TP_ARGS(name, len),
-+ TP_STRUCT__entry(__string_from_user(name, name) __field(int, len)),
-+ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(len, len)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_delete_module
-+SC_TRACE_EVENT(sys_delete_module,
-+ TP_PROTO(const char * name_user, unsigned int flags),
-+ TP_ARGS(name_user, flags),
-+ TP_STRUCT__entry(__string_from_user(name_user, name_user) __field(unsigned int, flags)),
-+ TP_fast_assign(tp_copy_string_from_user(name_user, name_user) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_sched_setparam
-+SC_TRACE_EVENT(sys_sched_setparam,
-+ TP_PROTO(pid_t pid, struct sched_param * param),
-+ TP_ARGS(pid, param),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct sched_param *, param)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(param, param)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_sched_getparam
-+SC_TRACE_EVENT(sys_sched_getparam,
-+ TP_PROTO(pid_t pid, struct sched_param * param),
-+ TP_ARGS(pid, param),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct sched_param *, param)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(param, param)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_sched_rr_get_interval
-+SC_TRACE_EVENT(sys_sched_rr_get_interval,
-+ TP_PROTO(pid_t pid, struct timespec * interval),
-+ TP_ARGS(pid, interval),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct timespec *, interval)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(interval, interval)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_nanosleep
-+SC_TRACE_EVENT(sys_nanosleep,
-+ TP_PROTO(struct timespec * rqtp, struct timespec * rmtp),
-+ TP_ARGS(rqtp, rmtp),
-+ TP_STRUCT__entry(__field_hex(struct timespec *, rqtp) __field_hex(struct timespec *, rmtp)),
-+ TP_fast_assign(tp_assign(rqtp, rqtp) tp_assign(rmtp, rmtp)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_rt_sigpending
-+SC_TRACE_EVENT(sys_rt_sigpending,
-+ TP_PROTO(sigset_t * set, size_t sigsetsize),
-+ TP_ARGS(set, sigsetsize),
-+ TP_STRUCT__entry(__field_hex(sigset_t *, set) __field(size_t, sigsetsize)),
-+ TP_fast_assign(tp_assign(set, set) tp_assign(sigsetsize, sigsetsize)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_rt_sigsuspend
-+SC_TRACE_EVENT(sys_rt_sigsuspend,
-+ TP_PROTO(sigset_t * unewset, size_t sigsetsize),
-+ TP_ARGS(unewset, sigsetsize),
-+ TP_STRUCT__entry(__field_hex(sigset_t *, unewset) __field(size_t, sigsetsize)),
-+ TP_fast_assign(tp_assign(unewset, unewset) tp_assign(sigsetsize, sigsetsize)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_getcwd
-+SC_TRACE_EVENT(sys_getcwd,
-+ TP_PROTO(char * buf, unsigned long size),
-+ TP_ARGS(buf, size),
-+ TP_STRUCT__entry(__field_hex(char *, buf) __field(unsigned long, size)),
-+ TP_fast_assign(tp_assign(buf, buf) tp_assign(size, size)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_getrlimit
-+SC_TRACE_EVENT(sys_getrlimit,
-+ TP_PROTO(unsigned int resource, struct rlimit * rlim),
-+ TP_ARGS(resource, rlim),
-+ TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
-+ TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_stat64
-+SC_TRACE_EVENT(sys_stat64,
-+ TP_PROTO(const char * filename, struct stat64 * statbuf),
-+ TP_ARGS(filename, statbuf),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat64 *, statbuf)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_lstat64
-+SC_TRACE_EVENT(sys_lstat64,
-+ TP_PROTO(const char * filename, struct stat64 * statbuf),
-+ TP_ARGS(filename, statbuf),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat64 *, statbuf)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_fstat64
-+SC_TRACE_EVENT(sys_fstat64,
-+ TP_PROTO(unsigned long fd, struct stat64 * statbuf),
-+ TP_ARGS(fd, statbuf),
-+ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(struct stat64 *, statbuf)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(statbuf, statbuf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_getgroups
-+SC_TRACE_EVENT(sys_getgroups,
-+ TP_PROTO(int gidsetsize, gid_t * grouplist),
-+ TP_ARGS(gidsetsize, grouplist),
-+ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(gid_t *, grouplist)),
-+ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setgroups
-+SC_TRACE_EVENT(sys_setgroups,
-+ TP_PROTO(int gidsetsize, gid_t * grouplist),
-+ TP_ARGS(gidsetsize, grouplist),
-+ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(gid_t *, grouplist)),
-+ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_pivot_root
-+SC_TRACE_EVENT(sys_pivot_root,
-+ TP_PROTO(const char * new_root, const char * put_old),
-+ TP_ARGS(new_root, put_old),
-+ TP_STRUCT__entry(__string_from_user(new_root, new_root) __string_from_user(put_old, put_old)),
-+ TP_fast_assign(tp_copy_string_from_user(new_root, new_root) tp_copy_string_from_user(put_old, put_old)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_removexattr
-+SC_TRACE_EVENT(sys_removexattr,
-+ TP_PROTO(const char * pathname, const char * name),
-+ TP_ARGS(pathname, name),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_lremovexattr
-+SC_TRACE_EVENT(sys_lremovexattr,
-+ TP_PROTO(const char * pathname, const char * name),
-+ TP_ARGS(pathname, name),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_fremovexattr
-+SC_TRACE_EVENT(sys_fremovexattr,
-+ TP_PROTO(int fd, const char * name),
-+ TP_ARGS(fd, name),
-+ TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_io_setup
-+SC_TRACE_EVENT(sys_io_setup,
-+ TP_PROTO(unsigned nr_events, aio_context_t * ctxp),
-+ TP_ARGS(nr_events, ctxp),
-+ TP_STRUCT__entry(__field(unsigned, nr_events) __field_hex(aio_context_t *, ctxp)),
-+ TP_fast_assign(tp_assign(nr_events, nr_events) tp_assign(ctxp, ctxp)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_timer_gettime
-+SC_TRACE_EVENT(sys_timer_gettime,
-+ TP_PROTO(timer_t timer_id, struct itimerspec * setting),
-+ TP_ARGS(timer_id, setting),
-+ TP_STRUCT__entry(__field(timer_t, timer_id) __field_hex(struct itimerspec *, setting)),
-+ TP_fast_assign(tp_assign(timer_id, timer_id) tp_assign(setting, setting)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_clock_settime
-+SC_TRACE_EVENT(sys_clock_settime,
-+ TP_PROTO(const clockid_t which_clock, const struct timespec * tp),
-+ TP_ARGS(which_clock, tp),
-+ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(const struct timespec *, tp)),
-+ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_clock_gettime
-+SC_TRACE_EVENT(sys_clock_gettime,
-+ TP_PROTO(const clockid_t which_clock, struct timespec * tp),
-+ TP_ARGS(which_clock, tp),
-+ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timespec *, tp)),
-+ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_clock_getres
-+SC_TRACE_EVENT(sys_clock_getres,
-+ TP_PROTO(const clockid_t which_clock, struct timespec * tp),
-+ TP_ARGS(which_clock, tp),
-+ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timespec *, tp)),
-+ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_utimes
-+SC_TRACE_EVENT(sys_utimes,
-+ TP_PROTO(char * filename, struct timeval * utimes),
-+ TP_ARGS(filename, utimes),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct timeval *, utimes)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_mq_notify
-+SC_TRACE_EVENT(sys_mq_notify,
-+ TP_PROTO(mqd_t mqdes, const struct sigevent * u_notification),
-+ TP_ARGS(mqdes, u_notification),
-+ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const struct sigevent *, u_notification)),
-+ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_notification, u_notification)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_set_robust_list
-+SC_TRACE_EVENT(sys_set_robust_list,
-+ TP_PROTO(struct robust_list_head * head, size_t len),
-+ TP_ARGS(head, len),
-+ TP_STRUCT__entry(__field_hex(struct robust_list_head *, head) __field(size_t, len)),
-+ TP_fast_assign(tp_assign(head, head) tp_assign(len, len)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_timerfd_gettime
-+SC_TRACE_EVENT(sys_timerfd_gettime,
-+ TP_PROTO(int ufd, struct itimerspec * otmr),
-+ TP_ARGS(ufd, otmr),
-+ TP_STRUCT__entry(__field(int, ufd) __field_hex(struct itimerspec *, otmr)),
-+ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(otmr, otmr)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_pipe2
-+SC_TRACE_EVENT(sys_pipe2,
-+ TP_PROTO(int * fildes, int flags),
-+ TP_ARGS(fildes, flags),
-+ TP_STRUCT__entry(__field_hex(int *, fildes) __field(int, flags)),
-+ TP_fast_assign(tp_assign(fildes, fildes) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_clock_adjtime
-+SC_TRACE_EVENT(sys_clock_adjtime,
-+ TP_PROTO(const clockid_t which_clock, struct timex * utx),
-+ TP_ARGS(which_clock, utx),
-+ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timex *, utx)),
-+ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(utx, utx)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_read
-+SC_TRACE_EVENT(sys_read,
-+ TP_PROTO(unsigned int fd, char * buf, size_t count),
-+ TP_ARGS(fd, buf, count),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(char *, buf) __field(size_t, count)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_write
-+SC_TRACE_EVENT(sys_write,
-+ TP_PROTO(unsigned int fd, const char * buf, size_t count),
-+ TP_ARGS(fd, buf, count),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(const char *, buf) __field(size_t, count)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_open
-+SC_TRACE_EVENT(sys_open,
-+ TP_PROTO(const char * filename, int flags, int mode),
-+ TP_ARGS(filename, flags, mode),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, flags) __field(int, mode)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(flags, flags) tp_assign(mode, mode)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_waitpid
-+SC_TRACE_EVENT(sys_waitpid,
-+ TP_PROTO(pid_t pid, int * stat_addr, int options),
-+ TP_ARGS(pid, stat_addr, options),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(int *, stat_addr) __field(int, options)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(stat_addr, stat_addr) tp_assign(options, options)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_mknod
-+SC_TRACE_EVENT(sys_mknod,
-+ TP_PROTO(const char * filename, int mode, unsigned dev),
-+ TP_ARGS(filename, mode, dev),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, mode) __field(unsigned, dev)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode) tp_assign(dev, dev)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_lchown16
-+SC_TRACE_EVENT(sys_lchown16,
-+ TP_PROTO(const char * filename, old_uid_t user, old_gid_t group),
-+ TP_ARGS(filename, user, group),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field(old_uid_t, user) __field(old_gid_t, group)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_readlink
-+SC_TRACE_EVENT(sys_readlink,
-+ TP_PROTO(const char * path, char * buf, int bufsiz),
-+ TP_ARGS(path, buf, bufsiz),
-+ TP_STRUCT__entry(__string_from_user(path, path) __field_hex(char *, buf) __field(int, bufsiz)),
-+ TP_fast_assign(tp_copy_string_from_user(path, path) tp_assign(buf, buf) tp_assign(bufsiz, bufsiz)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_old_readdir
-+SC_TRACE_EVENT(sys_old_readdir,
-+ TP_PROTO(unsigned int fd, struct old_linux_dirent * dirent, unsigned int count),
-+ TP_ARGS(fd, dirent, count),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct old_linux_dirent *, dirent) __field(unsigned int, count)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_syslog
-+SC_TRACE_EVENT(sys_syslog,
-+ TP_PROTO(int type, char * buf, int len),
-+ TP_ARGS(type, buf, len),
-+ TP_STRUCT__entry(__field(int, type) __field_hex(char *, buf) __field(int, len)),
-+ TP_fast_assign(tp_assign(type, type) tp_assign(buf, buf) tp_assign(len, len)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setitimer
-+SC_TRACE_EVENT(sys_setitimer,
-+ TP_PROTO(int which, struct itimerval * value, struct itimerval * ovalue),
-+ TP_ARGS(which, value, ovalue),
-+ TP_STRUCT__entry(__field(int, which) __field_hex(struct itimerval *, value) __field_hex(struct itimerval *, ovalue)),
-+ TP_fast_assign(tp_assign(which, which) tp_assign(value, value) tp_assign(ovalue, ovalue)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_sigprocmask
-+SC_TRACE_EVENT(sys_sigprocmask,
-+ TP_PROTO(int how, old_sigset_t * nset, old_sigset_t * oset),
-+ TP_ARGS(how, nset, oset),
-+ TP_STRUCT__entry(__field(int, how) __field_hex(old_sigset_t *, nset) __field_hex(old_sigset_t *, oset)),
-+ TP_fast_assign(tp_assign(how, how) tp_assign(nset, nset) tp_assign(oset, oset)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_init_module
-+SC_TRACE_EVENT(sys_init_module,
-+ TP_PROTO(void * umod, unsigned long len, const char * uargs),
-+ TP_ARGS(umod, len, uargs),
-+ TP_STRUCT__entry(__field_hex(void *, umod) __field(unsigned long, len) __field_hex(const char *, uargs)),
-+ TP_fast_assign(tp_assign(umod, umod) tp_assign(len, len) tp_assign(uargs, uargs)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_getdents
-+SC_TRACE_EVENT(sys_getdents,
-+ TP_PROTO(unsigned int fd, struct linux_dirent * dirent, unsigned int count),
-+ TP_ARGS(fd, dirent, count),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct linux_dirent *, dirent) __field(unsigned int, count)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_readv
-+SC_TRACE_EVENT(sys_readv,
-+ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen),
-+ TP_ARGS(fd, vec, vlen),
-+ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_writev
-+SC_TRACE_EVENT(sys_writev,
-+ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen),
-+ TP_ARGS(fd, vec, vlen),
-+ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_sched_setscheduler
-+SC_TRACE_EVENT(sys_sched_setscheduler,
-+ TP_PROTO(pid_t pid, int policy, struct sched_param * param),
-+ TP_ARGS(pid, policy, param),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field(int, policy) __field_hex(struct sched_param *, param)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(policy, policy) tp_assign(param, param)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_getresuid16
-+SC_TRACE_EVENT(sys_getresuid16,
-+ TP_PROTO(old_uid_t * ruid, old_uid_t * euid, old_uid_t * suid),
-+ TP_ARGS(ruid, euid, suid),
-+ TP_STRUCT__entry(__field_hex(old_uid_t *, ruid) __field_hex(old_uid_t *, euid) __field_hex(old_uid_t *, suid)),
-+ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_poll
-+SC_TRACE_EVENT(sys_poll,
-+ TP_PROTO(struct pollfd * ufds, unsigned int nfds, long timeout_msecs),
-+ TP_ARGS(ufds, nfds, timeout_msecs),
-+ TP_STRUCT__entry(__field_hex(struct pollfd *, ufds) __field(unsigned int, nfds) __field(long, timeout_msecs)),
-+ TP_fast_assign(tp_assign(ufds, ufds) tp_assign(nfds, nfds) tp_assign(timeout_msecs, timeout_msecs)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_getresgid16
-+SC_TRACE_EVENT(sys_getresgid16,
-+ TP_PROTO(old_gid_t * rgid, old_gid_t * egid, old_gid_t * sgid),
-+ TP_ARGS(rgid, egid, sgid),
-+ TP_STRUCT__entry(__field_hex(old_gid_t *, rgid) __field_hex(old_gid_t *, egid) __field_hex(old_gid_t *, sgid)),
-+ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_rt_sigqueueinfo
-+SC_TRACE_EVENT(sys_rt_sigqueueinfo,
-+ TP_PROTO(pid_t pid, int sig, siginfo_t * uinfo),
-+ TP_ARGS(pid, sig, uinfo),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig) __field_hex(siginfo_t *, uinfo)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig) tp_assign(uinfo, uinfo)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_chown16
-+SC_TRACE_EVENT(sys_chown16,
-+ TP_PROTO(const char * filename, old_uid_t user, old_gid_t group),
-+ TP_ARGS(filename, user, group),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field(old_uid_t, user) __field(old_gid_t, group)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_lchown
-+SC_TRACE_EVENT(sys_lchown,
-+ TP_PROTO(const char * filename, uid_t user, gid_t group),
-+ TP_ARGS(filename, user, group),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_getresuid
-+SC_TRACE_EVENT(sys_getresuid,
-+ TP_PROTO(uid_t * ruid, uid_t * euid, uid_t * suid),
-+ TP_ARGS(ruid, euid, suid),
-+ TP_STRUCT__entry(__field_hex(uid_t *, ruid) __field_hex(uid_t *, euid) __field_hex(uid_t *, suid)),
-+ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_getresgid
-+SC_TRACE_EVENT(sys_getresgid,
-+ TP_PROTO(gid_t * rgid, gid_t * egid, gid_t * sgid),
-+ TP_ARGS(rgid, egid, sgid),
-+ TP_STRUCT__entry(__field_hex(gid_t *, rgid) __field_hex(gid_t *, egid) __field_hex(gid_t *, sgid)),
-+ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_chown
-+SC_TRACE_EVENT(sys_chown,
-+ TP_PROTO(const char * filename, uid_t user, gid_t group),
-+ TP_ARGS(filename, user, group),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_mincore
-+SC_TRACE_EVENT(sys_mincore,
-+ TP_PROTO(unsigned long start, size_t len, unsigned char * vec),
-+ TP_ARGS(start, len, vec),
-+ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field_hex(unsigned char *, vec)),
-+ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(vec, vec)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_getdents64
-+SC_TRACE_EVENT(sys_getdents64,
-+ TP_PROTO(unsigned int fd, struct linux_dirent64 * dirent, unsigned int count),
-+ TP_ARGS(fd, dirent, count),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct linux_dirent64 *, dirent) __field(unsigned int, count)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_listxattr
-+SC_TRACE_EVENT(sys_listxattr,
-+ TP_PROTO(const char * pathname, char * list, size_t size),
-+ TP_ARGS(pathname, list, size),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(char *, list) __field(size_t, size)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(list, list) tp_assign(size, size)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_llistxattr
-+SC_TRACE_EVENT(sys_llistxattr,
-+ TP_PROTO(const char * pathname, char * list, size_t size),
-+ TP_ARGS(pathname, list, size),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(char *, list) __field(size_t, size)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(list, list) tp_assign(size, size)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_flistxattr
-+SC_TRACE_EVENT(sys_flistxattr,
-+ TP_PROTO(int fd, char * list, size_t size),
-+ TP_ARGS(fd, list, size),
-+ TP_STRUCT__entry(__field(int, fd) __field_hex(char *, list) __field(size_t, size)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(list, list) tp_assign(size, size)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_sched_setaffinity
-+SC_TRACE_EVENT(sys_sched_setaffinity,
-+ TP_PROTO(pid_t pid, unsigned int len, unsigned long * user_mask_ptr),
-+ TP_ARGS(pid, len, user_mask_ptr),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, len) __field_hex(unsigned long *, user_mask_ptr)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(len, len) tp_assign(user_mask_ptr, user_mask_ptr)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_sched_getaffinity
-+SC_TRACE_EVENT(sys_sched_getaffinity,
-+ TP_PROTO(pid_t pid, unsigned int len, unsigned long * user_mask_ptr),
-+ TP_ARGS(pid, len, user_mask_ptr),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, len) __field_hex(unsigned long *, user_mask_ptr)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(len, len) tp_assign(user_mask_ptr, user_mask_ptr)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_io_submit
-+SC_TRACE_EVENT(sys_io_submit,
-+ TP_PROTO(aio_context_t ctx_id, long nr, struct iocb * * iocbpp),
-+ TP_ARGS(ctx_id, nr, iocbpp),
-+ TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field(long, nr) __field_hex(struct iocb * *, iocbpp)),
-+ TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(nr, nr) tp_assign(iocbpp, iocbpp)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_io_cancel
-+SC_TRACE_EVENT(sys_io_cancel,
-+ TP_PROTO(aio_context_t ctx_id, struct iocb * iocb, struct io_event * result),
-+ TP_ARGS(ctx_id, iocb, result),
-+ TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field_hex(struct iocb *, iocb) __field_hex(struct io_event *, result)),
-+ TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(iocb, iocb) tp_assign(result, result)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_timer_create
-+SC_TRACE_EVENT(sys_timer_create,
-+ TP_PROTO(const clockid_t which_clock, struct sigevent * timer_event_spec, timer_t * created_timer_id),
-+ TP_ARGS(which_clock, timer_event_spec, created_timer_id),
-+ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct sigevent *, timer_event_spec) __field_hex(timer_t *, created_timer_id)),
-+ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(timer_event_spec, timer_event_spec) tp_assign(created_timer_id, created_timer_id)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_statfs64
-+SC_TRACE_EVENT(sys_statfs64,
-+ TP_PROTO(const char * pathname, size_t sz, struct statfs64 * buf),
-+ TP_ARGS(pathname, sz, buf),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(size_t, sz) __field_hex(struct statfs64 *, buf)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(sz, sz) tp_assign(buf, buf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_fstatfs64
-+SC_TRACE_EVENT(sys_fstatfs64,
-+ TP_PROTO(unsigned int fd, size_t sz, struct statfs64 * buf),
-+ TP_ARGS(fd, sz, buf),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field(size_t, sz) __field_hex(struct statfs64 *, buf)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(sz, sz) tp_assign(buf, buf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_mq_getsetattr
-+SC_TRACE_EVENT(sys_mq_getsetattr,
-+ TP_PROTO(mqd_t mqdes, const struct mq_attr * u_mqstat, struct mq_attr * u_omqstat),
-+ TP_ARGS(mqdes, u_mqstat, u_omqstat),
-+ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const struct mq_attr *, u_mqstat) __field_hex(struct mq_attr *, u_omqstat)),
-+ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_mqstat, u_mqstat) tp_assign(u_omqstat, u_omqstat)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_inotify_add_watch
-+SC_TRACE_EVENT(sys_inotify_add_watch,
-+ TP_PROTO(int fd, const char * pathname, u32 mask),
-+ TP_ARGS(fd, pathname, mask),
-+ TP_STRUCT__entry(__field(int, fd) __string_from_user(pathname, pathname) __field(u32, mask)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(pathname, pathname) tp_assign(mask, mask)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_mkdirat
-+SC_TRACE_EVENT(sys_mkdirat,
-+ TP_PROTO(int dfd, const char * pathname, int mode),
-+ TP_ARGS(dfd, pathname, mode),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field(int, mode)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_futimesat
-+SC_TRACE_EVENT(sys_futimesat,
-+ TP_PROTO(int dfd, const char * filename, struct timeval * utimes),
-+ TP_ARGS(dfd, filename, utimes),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct timeval *, utimes)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_unlinkat
-+SC_TRACE_EVENT(sys_unlinkat,
-+ TP_PROTO(int dfd, const char * pathname, int flag),
-+ TP_ARGS(dfd, pathname, flag),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field(int, flag)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(flag, flag)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_symlinkat
-+SC_TRACE_EVENT(sys_symlinkat,
-+ TP_PROTO(const char * oldname, int newdfd, const char * newname),
-+ TP_ARGS(oldname, newdfd, newname),
-+ TP_STRUCT__entry(__string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname)),
-+ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_fchmodat
-+SC_TRACE_EVENT(sys_fchmodat,
-+ TP_PROTO(int dfd, const char * filename, mode_t mode),
-+ TP_ARGS(dfd, filename, mode),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(mode_t, mode)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_faccessat
-+SC_TRACE_EVENT(sys_faccessat,
-+ TP_PROTO(int dfd, const char * filename, int mode),
-+ TP_ARGS(dfd, filename, mode),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, mode)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_get_robust_list
-+SC_TRACE_EVENT(sys_get_robust_list,
-+ TP_PROTO(int pid, struct robust_list_head * * head_ptr, size_t * len_ptr),
-+ TP_ARGS(pid, head_ptr, len_ptr),
-+ TP_STRUCT__entry(__field(int, pid) __field_hex(struct robust_list_head * *, head_ptr) __field_hex(size_t *, len_ptr)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(head_ptr, head_ptr) tp_assign(len_ptr, len_ptr)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_getcpu
-+SC_TRACE_EVENT(sys_getcpu,
-+ TP_PROTO(unsigned * cpup, unsigned * nodep, struct getcpu_cache * unused),
-+ TP_ARGS(cpup, nodep, unused),
-+ TP_STRUCT__entry(__field_hex(unsigned *, cpup) __field_hex(unsigned *, nodep) __field_hex(struct getcpu_cache *, unused)),
-+ TP_fast_assign(tp_assign(cpup, cpup) tp_assign(nodep, nodep) tp_assign(unused, unused)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_signalfd
-+SC_TRACE_EVENT(sys_signalfd,
-+ TP_PROTO(int ufd, sigset_t * user_mask, size_t sizemask),
-+ TP_ARGS(ufd, user_mask, sizemask),
-+ TP_STRUCT__entry(__field(int, ufd) __field_hex(sigset_t *, user_mask) __field(size_t, sizemask)),
-+ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(user_mask, user_mask) tp_assign(sizemask, sizemask)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_reboot
-+SC_TRACE_EVENT(sys_reboot,
-+ TP_PROTO(int magic1, int magic2, unsigned int cmd, void * arg),
-+ TP_ARGS(magic1, magic2, cmd, arg),
-+ TP_STRUCT__entry(__field(int, magic1) __field(int, magic2) __field(unsigned int, cmd) __field_hex(void *, arg)),
-+ TP_fast_assign(tp_assign(magic1, magic1) tp_assign(magic2, magic2) tp_assign(cmd, cmd) tp_assign(arg, arg)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_wait4
-+SC_TRACE_EVENT(sys_wait4,
-+ TP_PROTO(pid_t upid, int * stat_addr, int options, struct rusage * ru),
-+ TP_ARGS(upid, stat_addr, options, ru),
-+ TP_STRUCT__entry(__field(pid_t, upid) __field_hex(int *, stat_addr) __field(int, options) __field_hex(struct rusage *, ru)),
-+ TP_fast_assign(tp_assign(upid, upid) tp_assign(stat_addr, stat_addr) tp_assign(options, options) tp_assign(ru, ru)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_quotactl
-+SC_TRACE_EVENT(sys_quotactl,
-+ TP_PROTO(unsigned int cmd, const char * special, qid_t id, void * addr),
-+ TP_ARGS(cmd, special, id, addr),
-+ TP_STRUCT__entry(__field(unsigned int, cmd) __field_hex(const char *, special) __field(qid_t, id) __field_hex(void *, addr)),
-+ TP_fast_assign(tp_assign(cmd, cmd) tp_assign(special, special) tp_assign(id, id) tp_assign(addr, addr)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_rt_sigaction
-+SC_TRACE_EVENT(sys_rt_sigaction,
-+ TP_PROTO(int sig, const struct sigaction * act, struct sigaction * oact, size_t sigsetsize),
-+ TP_ARGS(sig, act, oact, sigsetsize),
-+ TP_STRUCT__entry(__field(int, sig) __field_hex(const struct sigaction *, act) __field_hex(struct sigaction *, oact) __field(size_t, sigsetsize)),
-+ TP_fast_assign(tp_assign(sig, sig) tp_assign(act, act) tp_assign(oact, oact) tp_assign(sigsetsize, sigsetsize)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_rt_sigprocmask
-+SC_TRACE_EVENT(sys_rt_sigprocmask,
-+ TP_PROTO(int how, sigset_t * nset, sigset_t * oset, size_t sigsetsize),
-+ TP_ARGS(how, nset, oset, sigsetsize),
-+ TP_STRUCT__entry(__field(int, how) __field_hex(sigset_t *, nset) __field_hex(sigset_t *, oset) __field(size_t, sigsetsize)),
-+ TP_fast_assign(tp_assign(how, how) tp_assign(nset, nset) tp_assign(oset, oset) tp_assign(sigsetsize, sigsetsize)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_rt_sigtimedwait
-+SC_TRACE_EVENT(sys_rt_sigtimedwait,
-+ TP_PROTO(const sigset_t * uthese, siginfo_t * uinfo, const struct timespec * uts, size_t sigsetsize),
-+ TP_ARGS(uthese, uinfo, uts, sigsetsize),
-+ TP_STRUCT__entry(__field_hex(const sigset_t *, uthese) __field_hex(siginfo_t *, uinfo) __field_hex(const struct timespec *, uts) __field(size_t, sigsetsize)),
-+ TP_fast_assign(tp_assign(uthese, uthese) tp_assign(uinfo, uinfo) tp_assign(uts, uts) tp_assign(sigsetsize, sigsetsize)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_sendfile
-+SC_TRACE_EVENT(sys_sendfile,
-+ TP_PROTO(int out_fd, int in_fd, off_t * offset, size_t count),
-+ TP_ARGS(out_fd, in_fd, offset, count),
-+ TP_STRUCT__entry(__field(int, out_fd) __field(int, in_fd) __field_hex(off_t *, offset) __field(size_t, count)),
-+ TP_fast_assign(tp_assign(out_fd, out_fd) tp_assign(in_fd, in_fd) tp_assign(offset, offset) tp_assign(count, count)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_getxattr
-+SC_TRACE_EVENT(sys_getxattr,
-+ TP_PROTO(const char * pathname, const char * name, void * value, size_t size),
-+ TP_ARGS(pathname, name, value, size),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_lgetxattr
-+SC_TRACE_EVENT(sys_lgetxattr,
-+ TP_PROTO(const char * pathname, const char * name, void * value, size_t size),
-+ TP_ARGS(pathname, name, value, size),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_fgetxattr
-+SC_TRACE_EVENT(sys_fgetxattr,
-+ TP_PROTO(int fd, const char * name, void * value, size_t size),
-+ TP_ARGS(fd, name, value, size),
-+ TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_sendfile64
-+SC_TRACE_EVENT(sys_sendfile64,
-+ TP_PROTO(int out_fd, int in_fd, loff_t * offset, size_t count),
-+ TP_ARGS(out_fd, in_fd, offset, count),
-+ TP_STRUCT__entry(__field(int, out_fd) __field(int, in_fd) __field_hex(loff_t *, offset) __field(size_t, count)),
-+ TP_fast_assign(tp_assign(out_fd, out_fd) tp_assign(in_fd, in_fd) tp_assign(offset, offset) tp_assign(count, count)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_epoll_ctl
-+SC_TRACE_EVENT(sys_epoll_ctl,
-+ TP_PROTO(int epfd, int op, int fd, struct epoll_event * event),
-+ TP_ARGS(epfd, op, fd, event),
-+ TP_STRUCT__entry(__field(int, epfd) __field(int, op) __field(int, fd) __field_hex(struct epoll_event *, event)),
-+ TP_fast_assign(tp_assign(epfd, epfd) tp_assign(op, op) tp_assign(fd, fd) tp_assign(event, event)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_epoll_wait
-+SC_TRACE_EVENT(sys_epoll_wait,
-+ TP_PROTO(int epfd, struct epoll_event * events, int maxevents, int timeout),
-+ TP_ARGS(epfd, events, maxevents, timeout),
-+ TP_STRUCT__entry(__field(int, epfd) __field_hex(struct epoll_event *, events) __field(int, maxevents) __field(int, timeout)),
-+ TP_fast_assign(tp_assign(epfd, epfd) tp_assign(events, events) tp_assign(maxevents, maxevents) tp_assign(timeout, timeout)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_timer_settime
-+SC_TRACE_EVENT(sys_timer_settime,
-+ TP_PROTO(timer_t timer_id, int flags, const struct itimerspec * new_setting, struct itimerspec * old_setting),
-+ TP_ARGS(timer_id, flags, new_setting, old_setting),
-+ TP_STRUCT__entry(__field(timer_t, timer_id) __field(int, flags) __field_hex(const struct itimerspec *, new_setting) __field_hex(struct itimerspec *, old_setting)),
-+ TP_fast_assign(tp_assign(timer_id, timer_id) tp_assign(flags, flags) tp_assign(new_setting, new_setting) tp_assign(old_setting, old_setting)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_clock_nanosleep
-+SC_TRACE_EVENT(sys_clock_nanosleep,
-+ TP_PROTO(const clockid_t which_clock, int flags, const struct timespec * rqtp, struct timespec * rmtp),
-+ TP_ARGS(which_clock, flags, rqtp, rmtp),
-+ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field(int, flags) __field_hex(const struct timespec *, rqtp) __field_hex(struct timespec *, rmtp)),
-+ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(flags, flags) tp_assign(rqtp, rqtp) tp_assign(rmtp, rmtp)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_mq_open
-+SC_TRACE_EVENT(sys_mq_open,
-+ TP_PROTO(const char * u_name, int oflag, mode_t mode, struct mq_attr * u_attr),
-+ TP_ARGS(u_name, oflag, mode, u_attr),
-+ TP_STRUCT__entry(__string_from_user(u_name, u_name) __field(int, oflag) __field(mode_t, mode) __field_hex(struct mq_attr *, u_attr)),
-+ TP_fast_assign(tp_copy_string_from_user(u_name, u_name) tp_assign(oflag, oflag) tp_assign(mode, mode) tp_assign(u_attr, u_attr)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_kexec_load
-+SC_TRACE_EVENT(sys_kexec_load,
-+ TP_PROTO(unsigned long entry, unsigned long nr_segments, struct kexec_segment * segments, unsigned long flags),
-+ TP_ARGS(entry, nr_segments, segments, flags),
-+ TP_STRUCT__entry(__field(unsigned long, entry) __field(unsigned long, nr_segments) __field_hex(struct kexec_segment *, segments) __field(unsigned long, flags)),
-+ TP_fast_assign(tp_assign(entry, entry) tp_assign(nr_segments, nr_segments) tp_assign(segments, segments) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_request_key
-+SC_TRACE_EVENT(sys_request_key,
-+ TP_PROTO(const char * _type, const char * _description, const char * _callout_info, key_serial_t destringid),
-+ TP_ARGS(_type, _description, _callout_info, destringid),
-+ TP_STRUCT__entry(__string_from_user(_type, _type) __field_hex(const char *, _description) __field_hex(const char *, _callout_info) __field(key_serial_t, destringid)),
-+ TP_fast_assign(tp_copy_string_from_user(_type, _type) tp_assign(_description, _description) tp_assign(_callout_info, _callout_info) tp_assign(destringid, destringid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_openat
-+SC_TRACE_EVENT(sys_openat,
-+ TP_PROTO(int dfd, const char * filename, int flags, int mode),
-+ TP_ARGS(dfd, filename, flags, mode),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, flags) __field(int, mode)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(flags, flags) tp_assign(mode, mode)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_mknodat
-+SC_TRACE_EVENT(sys_mknodat,
-+ TP_PROTO(int dfd, const char * filename, int mode, unsigned dev),
-+ TP_ARGS(dfd, filename, mode, dev),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, mode) __field(unsigned, dev)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode) tp_assign(dev, dev)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_fstatat64
-+SC_TRACE_EVENT(sys_fstatat64,
-+ TP_PROTO(int dfd, const char * filename, struct stat64 * statbuf, int flag),
-+ TP_ARGS(dfd, filename, statbuf, flag),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct stat64 *, statbuf) __field(int, flag)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf) tp_assign(flag, flag)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_renameat
-+SC_TRACE_EVENT(sys_renameat,
-+ TP_PROTO(int olddfd, const char * oldname, int newdfd, const char * newname),
-+ TP_ARGS(olddfd, oldname, newdfd, newname),
-+ TP_STRUCT__entry(__field(int, olddfd) __string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname)),
-+ TP_fast_assign(tp_assign(olddfd, olddfd) tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_readlinkat
-+SC_TRACE_EVENT(sys_readlinkat,
-+ TP_PROTO(int dfd, const char * pathname, char * buf, int bufsiz),
-+ TP_ARGS(dfd, pathname, buf, bufsiz),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field_hex(char *, buf) __field(int, bufsiz)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(buf, buf) tp_assign(bufsiz, bufsiz)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_vmsplice
-+SC_TRACE_EVENT(sys_vmsplice,
-+ TP_PROTO(int fd, const struct iovec * iov, unsigned long nr_segs, unsigned int flags),
-+ TP_ARGS(fd, iov, nr_segs, flags),
-+ TP_STRUCT__entry(__field(int, fd) __field_hex(const struct iovec *, iov) __field(unsigned long, nr_segs) __field(unsigned int, flags)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(iov, iov) tp_assign(nr_segs, nr_segs) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_utimensat
-+SC_TRACE_EVENT(sys_utimensat,
-+ TP_PROTO(int dfd, const char * filename, struct timespec * utimes, int flags),
-+ TP_ARGS(dfd, filename, utimes, flags),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct timespec *, utimes) __field(int, flags)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_timerfd_settime
-+SC_TRACE_EVENT(sys_timerfd_settime,
-+ TP_PROTO(int ufd, int flags, const struct itimerspec * utmr, struct itimerspec * otmr),
-+ TP_ARGS(ufd, flags, utmr, otmr),
-+ TP_STRUCT__entry(__field(int, ufd) __field(int, flags) __field_hex(const struct itimerspec *, utmr) __field_hex(struct itimerspec *, otmr)),
-+ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(flags, flags) tp_assign(utmr, utmr) tp_assign(otmr, otmr)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_signalfd4
-+SC_TRACE_EVENT(sys_signalfd4,
-+ TP_PROTO(int ufd, sigset_t * user_mask, size_t sizemask, int flags),
-+ TP_ARGS(ufd, user_mask, sizemask, flags),
-+ TP_STRUCT__entry(__field(int, ufd) __field_hex(sigset_t *, user_mask) __field(size_t, sizemask) __field(int, flags)),
-+ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(user_mask, user_mask) tp_assign(sizemask, sizemask) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_rt_tgsigqueueinfo
-+SC_TRACE_EVENT(sys_rt_tgsigqueueinfo,
-+ TP_PROTO(pid_t tgid, pid_t pid, int sig, siginfo_t * uinfo),
-+ TP_ARGS(tgid, pid, sig, uinfo),
-+ TP_STRUCT__entry(__field(pid_t, tgid) __field(pid_t, pid) __field(int, sig) __field_hex(siginfo_t *, uinfo)),
-+ TP_fast_assign(tp_assign(tgid, tgid) tp_assign(pid, pid) tp_assign(sig, sig) tp_assign(uinfo, uinfo)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_prlimit64
-+SC_TRACE_EVENT(sys_prlimit64,
-+ TP_PROTO(pid_t pid, unsigned int resource, const struct rlimit64 * new_rlim, struct rlimit64 * old_rlim),
-+ TP_ARGS(pid, resource, new_rlim, old_rlim),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, resource) __field_hex(const struct rlimit64 *, new_rlim) __field_hex(struct rlimit64 *, old_rlim)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(resource, resource) tp_assign(new_rlim, new_rlim) tp_assign(old_rlim, old_rlim)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_sendmmsg
-+SC_TRACE_EVENT(sys_sendmmsg,
-+ TP_PROTO(int fd, struct mmsghdr * mmsg, unsigned int vlen, unsigned int flags),
-+ TP_ARGS(fd, mmsg, vlen, flags),
-+ TP_STRUCT__entry(__field(int, fd) __field_hex(struct mmsghdr *, mmsg) __field(unsigned int, vlen) __field(unsigned int, flags)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(mmsg, mmsg) tp_assign(vlen, vlen) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_mount
-+SC_TRACE_EVENT(sys_mount,
-+ TP_PROTO(char * dev_name, char * dir_name, char * type, unsigned long flags, void * data),
-+ TP_ARGS(dev_name, dir_name, type, flags, data),
-+ TP_STRUCT__entry(__string_from_user(dev_name, dev_name) __string_from_user(dir_name, dir_name) __string_from_user(type, type) __field(unsigned long, flags) __field_hex(void *, data)),
-+ TP_fast_assign(tp_copy_string_from_user(dev_name, dev_name) tp_copy_string_from_user(dir_name, dir_name) tp_copy_string_from_user(type, type) tp_assign(flags, flags) tp_assign(data, data)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_llseek
-+SC_TRACE_EVENT(sys_llseek,
-+ TP_PROTO(unsigned int fd, unsigned long offset_high, unsigned long offset_low, loff_t * result, unsigned int origin),
-+ TP_ARGS(fd, offset_high, offset_low, result, origin),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned long, offset_high) __field(unsigned long, offset_low) __field_hex(loff_t *, result) __field(unsigned int, origin)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(offset_high, offset_high) tp_assign(offset_low, offset_low) tp_assign(result, result) tp_assign(origin, origin)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_select
-+SC_TRACE_EVENT(sys_select,
-+ TP_PROTO(int n, fd_set * inp, fd_set * outp, fd_set * exp, struct timeval * tvp),
-+ TP_ARGS(n, inp, outp, exp, tvp),
-+ TP_STRUCT__entry(__field(int, n) __field_hex(fd_set *, inp) __field_hex(fd_set *, outp) __field_hex(fd_set *, exp) __field_hex(struct timeval *, tvp)),
-+ TP_fast_assign(tp_assign(n, n) tp_assign(inp, inp) tp_assign(outp, outp) tp_assign(exp, exp) tp_assign(tvp, tvp)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_setxattr
-+SC_TRACE_EVENT(sys_setxattr,
-+ TP_PROTO(const char * pathname, const char * name, const void * value, size_t size, int flags),
-+ TP_ARGS(pathname, name, value, size, flags),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_lsetxattr
-+SC_TRACE_EVENT(sys_lsetxattr,
-+ TP_PROTO(const char * pathname, const char * name, const void * value, size_t size, int flags),
-+ TP_ARGS(pathname, name, value, size, flags),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_fsetxattr
-+SC_TRACE_EVENT(sys_fsetxattr,
-+ TP_PROTO(int fd, const char * name, const void * value, size_t size, int flags),
-+ TP_ARGS(fd, name, value, size, flags),
-+ TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_io_getevents
-+SC_TRACE_EVENT(sys_io_getevents,
-+ TP_PROTO(aio_context_t ctx_id, long min_nr, long nr, struct io_event * events, struct timespec * timeout),
-+ TP_ARGS(ctx_id, min_nr, nr, events, timeout),
-+ TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field(long, min_nr) __field(long, nr) __field_hex(struct io_event *, events) __field_hex(struct timespec *, timeout)),
-+ TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(min_nr, min_nr) tp_assign(nr, nr) tp_assign(events, events) tp_assign(timeout, timeout)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_mq_timedsend
-+SC_TRACE_EVENT(sys_mq_timedsend,
-+ TP_PROTO(mqd_t mqdes, const char * u_msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec * u_abs_timeout),
-+ TP_ARGS(mqdes, u_msg_ptr, msg_len, msg_prio, u_abs_timeout),
-+ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const char *, u_msg_ptr) __field(size_t, msg_len) __field(unsigned int, msg_prio) __field_hex(const struct timespec *, u_abs_timeout)),
-+ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_msg_ptr, u_msg_ptr) tp_assign(msg_len, msg_len) tp_assign(msg_prio, msg_prio) tp_assign(u_abs_timeout, u_abs_timeout)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_mq_timedreceive
-+SC_TRACE_EVENT(sys_mq_timedreceive,
-+ TP_PROTO(mqd_t mqdes, char * u_msg_ptr, size_t msg_len, unsigned int * u_msg_prio, const struct timespec * u_abs_timeout),
-+ TP_ARGS(mqdes, u_msg_ptr, msg_len, u_msg_prio, u_abs_timeout),
-+ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(char *, u_msg_ptr) __field(size_t, msg_len) __field_hex(unsigned int *, u_msg_prio) __field_hex(const struct timespec *, u_abs_timeout)),
-+ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_msg_ptr, u_msg_ptr) tp_assign(msg_len, msg_len) tp_assign(u_msg_prio, u_msg_prio) tp_assign(u_abs_timeout, u_abs_timeout)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_waitid
-+SC_TRACE_EVENT(sys_waitid,
-+ TP_PROTO(int which, pid_t upid, struct siginfo * infop, int options, struct rusage * ru),
-+ TP_ARGS(which, upid, infop, options, ru),
-+ TP_STRUCT__entry(__field(int, which) __field(pid_t, upid) __field_hex(struct siginfo *, infop) __field(int, options) __field_hex(struct rusage *, ru)),
-+ TP_fast_assign(tp_assign(which, which) tp_assign(upid, upid) tp_assign(infop, infop) tp_assign(options, options) tp_assign(ru, ru)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_add_key
-+SC_TRACE_EVENT(sys_add_key,
-+ TP_PROTO(const char * _type, const char * _description, const void * _payload, size_t plen, key_serial_t ringid),
-+ TP_ARGS(_type, _description, _payload, plen, ringid),
-+ TP_STRUCT__entry(__string_from_user(_type, _type) __field_hex(const char *, _description) __field_hex(const void *, _payload) __field(size_t, plen) __field(key_serial_t, ringid)),
-+ TP_fast_assign(tp_copy_string_from_user(_type, _type) tp_assign(_description, _description) tp_assign(_payload, _payload) tp_assign(plen, plen) tp_assign(ringid, ringid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_fchownat
-+SC_TRACE_EVENT(sys_fchownat,
-+ TP_PROTO(int dfd, const char * filename, uid_t user, gid_t group, int flag),
-+ TP_ARGS(dfd, filename, user, group, flag),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group) __field(int, flag)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group) tp_assign(flag, flag)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_linkat
-+SC_TRACE_EVENT(sys_linkat,
-+ TP_PROTO(int olddfd, const char * oldname, int newdfd, const char * newname, int flags),
-+ TP_ARGS(olddfd, oldname, newdfd, newname, flags),
-+ TP_STRUCT__entry(__field(int, olddfd) __string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname) __field(int, flags)),
-+ TP_fast_assign(tp_assign(olddfd, olddfd) tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_ppoll
-+SC_TRACE_EVENT(sys_ppoll,
-+ TP_PROTO(struct pollfd * ufds, unsigned int nfds, struct timespec * tsp, const sigset_t * sigmask, size_t sigsetsize),
-+ TP_ARGS(ufds, nfds, tsp, sigmask, sigsetsize),
-+ TP_STRUCT__entry(__field_hex(struct pollfd *, ufds) __field(unsigned int, nfds) __field_hex(struct timespec *, tsp) __field_hex(const sigset_t *, sigmask) __field(size_t, sigsetsize)),
-+ TP_fast_assign(tp_assign(ufds, ufds) tp_assign(nfds, nfds) tp_assign(tsp, tsp) tp_assign(sigmask, sigmask) tp_assign(sigsetsize, sigsetsize)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_preadv
-+SC_TRACE_EVENT(sys_preadv,
-+ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h),
-+ TP_ARGS(fd, vec, vlen, pos_l, pos_h),
-+ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen) __field(unsigned long, pos_l) __field(unsigned long, pos_h)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen) tp_assign(pos_l, pos_l) tp_assign(pos_h, pos_h)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_pwritev
-+SC_TRACE_EVENT(sys_pwritev,
-+ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h),
-+ TP_ARGS(fd, vec, vlen, pos_l, pos_h),
-+ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen) __field(unsigned long, pos_l) __field(unsigned long, pos_h)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen) tp_assign(pos_l, pos_l) tp_assign(pos_h, pos_h)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_perf_event_open
-+SC_TRACE_EVENT(sys_perf_event_open,
-+ TP_PROTO(struct perf_event_attr * attr_uptr, pid_t pid, int cpu, int group_fd, unsigned long flags),
-+ TP_ARGS(attr_uptr, pid, cpu, group_fd, flags),
-+ TP_STRUCT__entry(__field_hex(struct perf_event_attr *, attr_uptr) __field(pid_t, pid) __field(int, cpu) __field(int, group_fd) __field(unsigned long, flags)),
-+ TP_fast_assign(tp_assign(attr_uptr, attr_uptr) tp_assign(pid, pid) tp_assign(cpu, cpu) tp_assign(group_fd, group_fd) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_recvmmsg
-+SC_TRACE_EVENT(sys_recvmmsg,
-+ TP_PROTO(int fd, struct mmsghdr * mmsg, unsigned int vlen, unsigned int flags, struct timespec * timeout),
-+ TP_ARGS(fd, mmsg, vlen, flags, timeout),
-+ TP_STRUCT__entry(__field(int, fd) __field_hex(struct mmsghdr *, mmsg) __field(unsigned int, vlen) __field(unsigned int, flags) __field_hex(struct timespec *, timeout)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(mmsg, mmsg) tp_assign(vlen, vlen) tp_assign(flags, flags) tp_assign(timeout, timeout)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_ipc
-+SC_TRACE_EVENT(sys_ipc,
-+ TP_PROTO(unsigned int call, int first, unsigned long second, unsigned long third, void * ptr, long fifth),
-+ TP_ARGS(call, first, second, third, ptr, fifth),
-+ TP_STRUCT__entry(__field(unsigned int, call) __field(int, first) __field(unsigned long, second) __field(unsigned long, third) __field_hex(void *, ptr) __field(long, fifth)),
-+ TP_fast_assign(tp_assign(call, call) tp_assign(first, first) tp_assign(second, second) tp_assign(third, third) tp_assign(ptr, ptr) tp_assign(fifth, fifth)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_futex
-+SC_TRACE_EVENT(sys_futex,
-+ TP_PROTO(u32 * uaddr, int op, u32 val, struct timespec * utime, u32 * uaddr2, u32 val3),
-+ TP_ARGS(uaddr, op, val, utime, uaddr2, val3),
-+ TP_STRUCT__entry(__field_hex(u32 *, uaddr) __field(int, op) __field(u32, val) __field_hex(struct timespec *, utime) __field_hex(u32 *, uaddr2) __field(u32, val3)),
-+ TP_fast_assign(tp_assign(uaddr, uaddr) tp_assign(op, op) tp_assign(val, val) tp_assign(utime, utime) tp_assign(uaddr2, uaddr2) tp_assign(val3, val3)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_pselect6
-+SC_TRACE_EVENT(sys_pselect6,
-+ TP_PROTO(int n, fd_set * inp, fd_set * outp, fd_set * exp, struct timespec * tsp, void * sig),
-+ TP_ARGS(n, inp, outp, exp, tsp, sig),
-+ TP_STRUCT__entry(__field(int, n) __field_hex(fd_set *, inp) __field_hex(fd_set *, outp) __field_hex(fd_set *, exp) __field_hex(struct timespec *, tsp) __field_hex(void *, sig)),
-+ TP_fast_assign(tp_assign(n, n) tp_assign(inp, inp) tp_assign(outp, outp) tp_assign(exp, exp) tp_assign(tsp, tsp) tp_assign(sig, sig)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_splice
-+SC_TRACE_EVENT(sys_splice,
-+ TP_PROTO(int fd_in, loff_t * off_in, int fd_out, loff_t * off_out, size_t len, unsigned int flags),
-+ TP_ARGS(fd_in, off_in, fd_out, off_out, len, flags),
-+ TP_STRUCT__entry(__field(int, fd_in) __field_hex(loff_t *, off_in) __field(int, fd_out) __field_hex(loff_t *, off_out) __field(size_t, len) __field(unsigned int, flags)),
-+ TP_fast_assign(tp_assign(fd_in, fd_in) tp_assign(off_in, off_in) tp_assign(fd_out, fd_out) tp_assign(off_out, off_out) tp_assign(len, len) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_32_sys_epoll_pwait
-+SC_TRACE_EVENT(sys_epoll_pwait,
-+ TP_PROTO(int epfd, struct epoll_event * events, int maxevents, int timeout, const sigset_t * sigmask, size_t sigsetsize),
-+ TP_ARGS(epfd, events, maxevents, timeout, sigmask, sigsetsize),
-+ TP_STRUCT__entry(__field(int, epfd) __field_hex(struct epoll_event *, events) __field(int, maxevents) __field(int, timeout) __field_hex(const sigset_t *, sigmask) __field(size_t, sigsetsize)),
-+ TP_fast_assign(tp_assign(epfd, epfd) tp_assign(events, events) tp_assign(maxevents, maxevents) tp_assign(timeout, timeout) tp_assign(sigmask, sigmask) tp_assign(sigsetsize, sigsetsize)),
-+ TP_printk()
-+)
-+#endif
-+
-+#endif /* _TRACE_SYSCALLS_POINTERS_H */
-+
-+/* This part must be outside protection */
-+#include "../../../probes/define_trace.h"
-+
-+#else /* CREATE_SYSCALL_TABLE */
-+
-+#include "x86-32-syscalls-3.1.0-rc6_pointers_override.h"
-+#include "syscalls_pointers_override.h"
-+
-+#ifndef OVERRIDE_TABLE_32_sys_read
-+TRACE_SYSCALL_TABLE(sys_read, sys_read, 3, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_write
-+TRACE_SYSCALL_TABLE(sys_write, sys_write, 4, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_open
-+TRACE_SYSCALL_TABLE(sys_open, sys_open, 5, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_waitpid
-+TRACE_SYSCALL_TABLE(sys_waitpid, sys_waitpid, 7, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_creat
-+TRACE_SYSCALL_TABLE(sys_creat, sys_creat, 8, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_link
-+TRACE_SYSCALL_TABLE(sys_link, sys_link, 9, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_unlink
-+TRACE_SYSCALL_TABLE(sys_unlink, sys_unlink, 10, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_chdir
-+TRACE_SYSCALL_TABLE(sys_chdir, sys_chdir, 12, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_time
-+TRACE_SYSCALL_TABLE(sys_time, sys_time, 13, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_mknod
-+TRACE_SYSCALL_TABLE(sys_mknod, sys_mknod, 14, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_chmod
-+TRACE_SYSCALL_TABLE(sys_chmod, sys_chmod, 15, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_lchown16
-+TRACE_SYSCALL_TABLE(sys_lchown16, sys_lchown16, 16, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_stat
-+TRACE_SYSCALL_TABLE(sys_stat, sys_stat, 18, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_mount
-+TRACE_SYSCALL_TABLE(sys_mount, sys_mount, 21, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_oldumount
-+TRACE_SYSCALL_TABLE(sys_oldumount, sys_oldumount, 22, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_stime
-+TRACE_SYSCALL_TABLE(sys_stime, sys_stime, 25, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_fstat
-+TRACE_SYSCALL_TABLE(sys_fstat, sys_fstat, 28, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_utime
-+TRACE_SYSCALL_TABLE(sys_utime, sys_utime, 30, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_access
-+TRACE_SYSCALL_TABLE(sys_access, sys_access, 33, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_rename
-+TRACE_SYSCALL_TABLE(sys_rename, sys_rename, 38, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_mkdir
-+TRACE_SYSCALL_TABLE(sys_mkdir, sys_mkdir, 39, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_rmdir
-+TRACE_SYSCALL_TABLE(sys_rmdir, sys_rmdir, 40, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_pipe
-+TRACE_SYSCALL_TABLE(sys_pipe, sys_pipe, 42, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_times
-+TRACE_SYSCALL_TABLE(sys_times, sys_times, 43, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_acct
-+TRACE_SYSCALL_TABLE(sys_acct, sys_acct, 51, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_umount
-+TRACE_SYSCALL_TABLE(sys_umount, sys_umount, 52, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_olduname
-+TRACE_SYSCALL_TABLE(sys_olduname, sys_olduname, 59, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_chroot
-+TRACE_SYSCALL_TABLE(sys_chroot, sys_chroot, 61, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_ustat
-+TRACE_SYSCALL_TABLE(sys_ustat, sys_ustat, 62, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_sigpending
-+TRACE_SYSCALL_TABLE(sys_sigpending, sys_sigpending, 73, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_sethostname
-+TRACE_SYSCALL_TABLE(sys_sethostname, sys_sethostname, 74, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setrlimit
-+TRACE_SYSCALL_TABLE(sys_setrlimit, sys_setrlimit, 75, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_old_getrlimit
-+TRACE_SYSCALL_TABLE(sys_old_getrlimit, sys_old_getrlimit, 76, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getrusage
-+TRACE_SYSCALL_TABLE(sys_getrusage, sys_getrusage, 77, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_gettimeofday
-+TRACE_SYSCALL_TABLE(sys_gettimeofday, sys_gettimeofday, 78, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_settimeofday
-+TRACE_SYSCALL_TABLE(sys_settimeofday, sys_settimeofday, 79, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getgroups16
-+TRACE_SYSCALL_TABLE(sys_getgroups16, sys_getgroups16, 80, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setgroups16
-+TRACE_SYSCALL_TABLE(sys_setgroups16, sys_setgroups16, 81, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_old_select
-+TRACE_SYSCALL_TABLE(sys_old_select, sys_old_select, 82, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_symlink
-+TRACE_SYSCALL_TABLE(sys_symlink, sys_symlink, 83, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_lstat
-+TRACE_SYSCALL_TABLE(sys_lstat, sys_lstat, 84, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_readlink
-+TRACE_SYSCALL_TABLE(sys_readlink, sys_readlink, 85, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_uselib
-+TRACE_SYSCALL_TABLE(sys_uselib, sys_uselib, 86, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_swapon
-+TRACE_SYSCALL_TABLE(sys_swapon, sys_swapon, 87, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_reboot
-+TRACE_SYSCALL_TABLE(sys_reboot, sys_reboot, 88, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_old_readdir
-+TRACE_SYSCALL_TABLE(sys_old_readdir, sys_old_readdir, 89, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_old_mmap
-+TRACE_SYSCALL_TABLE(sys_old_mmap, sys_old_mmap, 90, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_truncate
-+TRACE_SYSCALL_TABLE(sys_truncate, sys_truncate, 92, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_statfs
-+TRACE_SYSCALL_TABLE(sys_statfs, sys_statfs, 99, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_fstatfs
-+TRACE_SYSCALL_TABLE(sys_fstatfs, sys_fstatfs, 100, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_socketcall
-+TRACE_SYSCALL_TABLE(sys_socketcall, sys_socketcall, 102, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_syslog
-+TRACE_SYSCALL_TABLE(sys_syslog, sys_syslog, 103, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setitimer
-+TRACE_SYSCALL_TABLE(sys_setitimer, sys_setitimer, 104, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getitimer
-+TRACE_SYSCALL_TABLE(sys_getitimer, sys_getitimer, 105, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_newstat
-+TRACE_SYSCALL_TABLE(sys_newstat, sys_newstat, 106, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_newlstat
-+TRACE_SYSCALL_TABLE(sys_newlstat, sys_newlstat, 107, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_newfstat
-+TRACE_SYSCALL_TABLE(sys_newfstat, sys_newfstat, 108, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_uname
-+TRACE_SYSCALL_TABLE(sys_uname, sys_uname, 109, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_wait4
-+TRACE_SYSCALL_TABLE(sys_wait4, sys_wait4, 114, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_swapoff
-+TRACE_SYSCALL_TABLE(sys_swapoff, sys_swapoff, 115, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_sysinfo
-+TRACE_SYSCALL_TABLE(sys_sysinfo, sys_sysinfo, 116, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_ipc
-+TRACE_SYSCALL_TABLE(sys_ipc, sys_ipc, 117, 6)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setdomainname
-+TRACE_SYSCALL_TABLE(sys_setdomainname, sys_setdomainname, 121, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_newuname
-+TRACE_SYSCALL_TABLE(sys_newuname, sys_newuname, 122, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_adjtimex
-+TRACE_SYSCALL_TABLE(sys_adjtimex, sys_adjtimex, 124, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_sigprocmask
-+TRACE_SYSCALL_TABLE(sys_sigprocmask, sys_sigprocmask, 126, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_init_module
-+TRACE_SYSCALL_TABLE(sys_init_module, sys_init_module, 128, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_delete_module
-+TRACE_SYSCALL_TABLE(sys_delete_module, sys_delete_module, 129, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_quotactl
-+TRACE_SYSCALL_TABLE(sys_quotactl, sys_quotactl, 131, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_llseek
-+TRACE_SYSCALL_TABLE(sys_llseek, sys_llseek, 140, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getdents
-+TRACE_SYSCALL_TABLE(sys_getdents, sys_getdents, 141, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_select
-+TRACE_SYSCALL_TABLE(sys_select, sys_select, 142, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_readv
-+TRACE_SYSCALL_TABLE(sys_readv, sys_readv, 145, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_writev
-+TRACE_SYSCALL_TABLE(sys_writev, sys_writev, 146, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_sysctl
-+TRACE_SYSCALL_TABLE(sys_sysctl, sys_sysctl, 149, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_sched_setparam
-+TRACE_SYSCALL_TABLE(sys_sched_setparam, sys_sched_setparam, 154, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_sched_getparam
-+TRACE_SYSCALL_TABLE(sys_sched_getparam, sys_sched_getparam, 155, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_sched_setscheduler
-+TRACE_SYSCALL_TABLE(sys_sched_setscheduler, sys_sched_setscheduler, 156, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_sched_rr_get_interval
-+TRACE_SYSCALL_TABLE(sys_sched_rr_get_interval, sys_sched_rr_get_interval, 161, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_nanosleep
-+TRACE_SYSCALL_TABLE(sys_nanosleep, sys_nanosleep, 162, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getresuid16
-+TRACE_SYSCALL_TABLE(sys_getresuid16, sys_getresuid16, 165, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_poll
-+TRACE_SYSCALL_TABLE(sys_poll, sys_poll, 168, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getresgid16
-+TRACE_SYSCALL_TABLE(sys_getresgid16, sys_getresgid16, 171, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_rt_sigaction
-+TRACE_SYSCALL_TABLE(sys_rt_sigaction, sys_rt_sigaction, 174, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_rt_sigprocmask
-+TRACE_SYSCALL_TABLE(sys_rt_sigprocmask, sys_rt_sigprocmask, 175, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_rt_sigpending
-+TRACE_SYSCALL_TABLE(sys_rt_sigpending, sys_rt_sigpending, 176, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_rt_sigtimedwait
-+TRACE_SYSCALL_TABLE(sys_rt_sigtimedwait, sys_rt_sigtimedwait, 177, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_rt_sigqueueinfo
-+TRACE_SYSCALL_TABLE(sys_rt_sigqueueinfo, sys_rt_sigqueueinfo, 178, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_rt_sigsuspend
-+TRACE_SYSCALL_TABLE(sys_rt_sigsuspend, sys_rt_sigsuspend, 179, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_chown16
-+TRACE_SYSCALL_TABLE(sys_chown16, sys_chown16, 182, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getcwd
-+TRACE_SYSCALL_TABLE(sys_getcwd, sys_getcwd, 183, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_sendfile
-+TRACE_SYSCALL_TABLE(sys_sendfile, sys_sendfile, 187, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getrlimit
-+TRACE_SYSCALL_TABLE(sys_getrlimit, sys_getrlimit, 191, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_stat64
-+TRACE_SYSCALL_TABLE(sys_stat64, sys_stat64, 195, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_lstat64
-+TRACE_SYSCALL_TABLE(sys_lstat64, sys_lstat64, 196, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_fstat64
-+TRACE_SYSCALL_TABLE(sys_fstat64, sys_fstat64, 197, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_lchown
-+TRACE_SYSCALL_TABLE(sys_lchown, sys_lchown, 198, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getgroups
-+TRACE_SYSCALL_TABLE(sys_getgroups, sys_getgroups, 205, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setgroups
-+TRACE_SYSCALL_TABLE(sys_setgroups, sys_setgroups, 206, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getresuid
-+TRACE_SYSCALL_TABLE(sys_getresuid, sys_getresuid, 209, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getresgid
-+TRACE_SYSCALL_TABLE(sys_getresgid, sys_getresgid, 211, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_chown
-+TRACE_SYSCALL_TABLE(sys_chown, sys_chown, 212, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_pivot_root
-+TRACE_SYSCALL_TABLE(sys_pivot_root, sys_pivot_root, 217, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_mincore
-+TRACE_SYSCALL_TABLE(sys_mincore, sys_mincore, 218, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getdents64
-+TRACE_SYSCALL_TABLE(sys_getdents64, sys_getdents64, 220, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_setxattr
-+TRACE_SYSCALL_TABLE(sys_setxattr, sys_setxattr, 226, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_lsetxattr
-+TRACE_SYSCALL_TABLE(sys_lsetxattr, sys_lsetxattr, 227, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_fsetxattr
-+TRACE_SYSCALL_TABLE(sys_fsetxattr, sys_fsetxattr, 228, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getxattr
-+TRACE_SYSCALL_TABLE(sys_getxattr, sys_getxattr, 229, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_lgetxattr
-+TRACE_SYSCALL_TABLE(sys_lgetxattr, sys_lgetxattr, 230, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_fgetxattr
-+TRACE_SYSCALL_TABLE(sys_fgetxattr, sys_fgetxattr, 231, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_listxattr
-+TRACE_SYSCALL_TABLE(sys_listxattr, sys_listxattr, 232, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_llistxattr
-+TRACE_SYSCALL_TABLE(sys_llistxattr, sys_llistxattr, 233, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_flistxattr
-+TRACE_SYSCALL_TABLE(sys_flistxattr, sys_flistxattr, 234, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_removexattr
-+TRACE_SYSCALL_TABLE(sys_removexattr, sys_removexattr, 235, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_lremovexattr
-+TRACE_SYSCALL_TABLE(sys_lremovexattr, sys_lremovexattr, 236, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_fremovexattr
-+TRACE_SYSCALL_TABLE(sys_fremovexattr, sys_fremovexattr, 237, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_sendfile64
-+TRACE_SYSCALL_TABLE(sys_sendfile64, sys_sendfile64, 239, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_futex
-+TRACE_SYSCALL_TABLE(sys_futex, sys_futex, 240, 6)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_sched_setaffinity
-+TRACE_SYSCALL_TABLE(sys_sched_setaffinity, sys_sched_setaffinity, 241, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_sched_getaffinity
-+TRACE_SYSCALL_TABLE(sys_sched_getaffinity, sys_sched_getaffinity, 242, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_io_setup
-+TRACE_SYSCALL_TABLE(sys_io_setup, sys_io_setup, 245, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_io_getevents
-+TRACE_SYSCALL_TABLE(sys_io_getevents, sys_io_getevents, 247, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_io_submit
-+TRACE_SYSCALL_TABLE(sys_io_submit, sys_io_submit, 248, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_io_cancel
-+TRACE_SYSCALL_TABLE(sys_io_cancel, sys_io_cancel, 249, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_epoll_ctl
-+TRACE_SYSCALL_TABLE(sys_epoll_ctl, sys_epoll_ctl, 255, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_epoll_wait
-+TRACE_SYSCALL_TABLE(sys_epoll_wait, sys_epoll_wait, 256, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_set_tid_address
-+TRACE_SYSCALL_TABLE(sys_set_tid_address, sys_set_tid_address, 258, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_timer_create
-+TRACE_SYSCALL_TABLE(sys_timer_create, sys_timer_create, 259, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_timer_settime
-+TRACE_SYSCALL_TABLE(sys_timer_settime, sys_timer_settime, 260, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_timer_gettime
-+TRACE_SYSCALL_TABLE(sys_timer_gettime, sys_timer_gettime, 261, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_clock_settime
-+TRACE_SYSCALL_TABLE(sys_clock_settime, sys_clock_settime, 264, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_clock_gettime
-+TRACE_SYSCALL_TABLE(sys_clock_gettime, sys_clock_gettime, 265, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_clock_getres
-+TRACE_SYSCALL_TABLE(sys_clock_getres, sys_clock_getres, 266, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_clock_nanosleep
-+TRACE_SYSCALL_TABLE(sys_clock_nanosleep, sys_clock_nanosleep, 267, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_statfs64
-+TRACE_SYSCALL_TABLE(sys_statfs64, sys_statfs64, 268, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_fstatfs64
-+TRACE_SYSCALL_TABLE(sys_fstatfs64, sys_fstatfs64, 269, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_utimes
-+TRACE_SYSCALL_TABLE(sys_utimes, sys_utimes, 271, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_mq_open
-+TRACE_SYSCALL_TABLE(sys_mq_open, sys_mq_open, 277, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_mq_unlink
-+TRACE_SYSCALL_TABLE(sys_mq_unlink, sys_mq_unlink, 278, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_mq_timedsend
-+TRACE_SYSCALL_TABLE(sys_mq_timedsend, sys_mq_timedsend, 279, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_mq_timedreceive
-+TRACE_SYSCALL_TABLE(sys_mq_timedreceive, sys_mq_timedreceive, 280, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_mq_notify
-+TRACE_SYSCALL_TABLE(sys_mq_notify, sys_mq_notify, 281, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_mq_getsetattr
-+TRACE_SYSCALL_TABLE(sys_mq_getsetattr, sys_mq_getsetattr, 282, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_kexec_load
-+TRACE_SYSCALL_TABLE(sys_kexec_load, sys_kexec_load, 283, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_waitid
-+TRACE_SYSCALL_TABLE(sys_waitid, sys_waitid, 284, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_add_key
-+TRACE_SYSCALL_TABLE(sys_add_key, sys_add_key, 286, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_request_key
-+TRACE_SYSCALL_TABLE(sys_request_key, sys_request_key, 287, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_inotify_add_watch
-+TRACE_SYSCALL_TABLE(sys_inotify_add_watch, sys_inotify_add_watch, 292, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_openat
-+TRACE_SYSCALL_TABLE(sys_openat, sys_openat, 295, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_mkdirat
-+TRACE_SYSCALL_TABLE(sys_mkdirat, sys_mkdirat, 296, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_mknodat
-+TRACE_SYSCALL_TABLE(sys_mknodat, sys_mknodat, 297, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_fchownat
-+TRACE_SYSCALL_TABLE(sys_fchownat, sys_fchownat, 298, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_futimesat
-+TRACE_SYSCALL_TABLE(sys_futimesat, sys_futimesat, 299, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_fstatat64
-+TRACE_SYSCALL_TABLE(sys_fstatat64, sys_fstatat64, 300, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_unlinkat
-+TRACE_SYSCALL_TABLE(sys_unlinkat, sys_unlinkat, 301, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_renameat
-+TRACE_SYSCALL_TABLE(sys_renameat, sys_renameat, 302, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_linkat
-+TRACE_SYSCALL_TABLE(sys_linkat, sys_linkat, 303, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_symlinkat
-+TRACE_SYSCALL_TABLE(sys_symlinkat, sys_symlinkat, 304, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_readlinkat
-+TRACE_SYSCALL_TABLE(sys_readlinkat, sys_readlinkat, 305, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_fchmodat
-+TRACE_SYSCALL_TABLE(sys_fchmodat, sys_fchmodat, 306, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_faccessat
-+TRACE_SYSCALL_TABLE(sys_faccessat, sys_faccessat, 307, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_pselect6
-+TRACE_SYSCALL_TABLE(sys_pselect6, sys_pselect6, 308, 6)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_ppoll
-+TRACE_SYSCALL_TABLE(sys_ppoll, sys_ppoll, 309, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_set_robust_list
-+TRACE_SYSCALL_TABLE(sys_set_robust_list, sys_set_robust_list, 311, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_get_robust_list
-+TRACE_SYSCALL_TABLE(sys_get_robust_list, sys_get_robust_list, 312, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_splice
-+TRACE_SYSCALL_TABLE(sys_splice, sys_splice, 313, 6)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_vmsplice
-+TRACE_SYSCALL_TABLE(sys_vmsplice, sys_vmsplice, 316, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_getcpu
-+TRACE_SYSCALL_TABLE(sys_getcpu, sys_getcpu, 318, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_epoll_pwait
-+TRACE_SYSCALL_TABLE(sys_epoll_pwait, sys_epoll_pwait, 319, 6)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_utimensat
-+TRACE_SYSCALL_TABLE(sys_utimensat, sys_utimensat, 320, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_signalfd
-+TRACE_SYSCALL_TABLE(sys_signalfd, sys_signalfd, 321, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_timerfd_settime
-+TRACE_SYSCALL_TABLE(sys_timerfd_settime, sys_timerfd_settime, 325, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_timerfd_gettime
-+TRACE_SYSCALL_TABLE(sys_timerfd_gettime, sys_timerfd_gettime, 326, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_signalfd4
-+TRACE_SYSCALL_TABLE(sys_signalfd4, sys_signalfd4, 327, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_pipe2
-+TRACE_SYSCALL_TABLE(sys_pipe2, sys_pipe2, 331, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_preadv
-+TRACE_SYSCALL_TABLE(sys_preadv, sys_preadv, 333, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_pwritev
-+TRACE_SYSCALL_TABLE(sys_pwritev, sys_pwritev, 334, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_rt_tgsigqueueinfo
-+TRACE_SYSCALL_TABLE(sys_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, 335, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_perf_event_open
-+TRACE_SYSCALL_TABLE(sys_perf_event_open, sys_perf_event_open, 336, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_recvmmsg
-+TRACE_SYSCALL_TABLE(sys_recvmmsg, sys_recvmmsg, 337, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_prlimit64
-+TRACE_SYSCALL_TABLE(sys_prlimit64, sys_prlimit64, 340, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_clock_adjtime
-+TRACE_SYSCALL_TABLE(sys_clock_adjtime, sys_clock_adjtime, 343, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_32_sys_sendmmsg
-+TRACE_SYSCALL_TABLE(sys_sendmmsg, sys_sendmmsg, 345, 4)
-+#endif
-+
-+#endif /* CREATE_SYSCALL_TABLE */
-diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers_override.h b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers_override.h
-new file mode 100644
-index 0000000..d35657c
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers_override.h
-@@ -0,0 +1,17 @@
-+#ifndef CONFIG_UID16
-+
-+#define OVERRIDE_32_sys_getgroups16
-+#define OVERRIDE_32_sys_setgroups16
-+#define OVERRIDE_32_sys_lchown16
-+#define OVERRIDE_32_sys_getresuid16
-+#define OVERRIDE_32_sys_getresgid16
-+#define OVERRIDE_32_sys_chown16
-+
-+#define OVERRIDE_TABLE_32_sys_getgroups16
-+#define OVERRIDE_TABLE_32_sys_setgroups16
-+#define OVERRIDE_TABLE_32_sys_lchown16
-+#define OVERRIDE_TABLE_32_sys_getresuid16
-+#define OVERRIDE_TABLE_32_sys_getresgid16
-+#define OVERRIDE_TABLE_32_sys_chown16
-+
-+#endif
-diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_integers.h b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_integers.h
-new file mode 100644
-index 0000000..6d0dbb9
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_integers.h
-@@ -0,0 +1,1013 @@
-+/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */
-+#ifndef CREATE_SYSCALL_TABLE
-+
-+#if !defined(_TRACE_SYSCALLS_INTEGERS_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_SYSCALLS_INTEGERS_H
-+
-+#include <linux/tracepoint.h>
-+#include <linux/syscalls.h>
-+#include "x86-64-syscalls-3.0.4_integers_override.h"
-+#include "syscalls_integers_override.h"
-+
-+SC_DECLARE_EVENT_CLASS_NOARGS(syscalls_noargs,
-+ TP_STRUCT__entry(),
-+ TP_fast_assign(),
-+ TP_printk()
-+)
-+#ifndef OVERRIDE_64_sys_sched_yield
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sched_yield)
-+#endif
-+#ifndef OVERRIDE_64_sys_pause
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_pause)
-+#endif
-+#ifndef OVERRIDE_64_sys_getpid
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getpid)
-+#endif
-+#ifndef OVERRIDE_64_sys_getuid
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getuid)
-+#endif
-+#ifndef OVERRIDE_64_sys_getgid
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getgid)
-+#endif
-+#ifndef OVERRIDE_64_sys_geteuid
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_geteuid)
-+#endif
-+#ifndef OVERRIDE_64_sys_getegid
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getegid)
-+#endif
-+#ifndef OVERRIDE_64_sys_getppid
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getppid)
-+#endif
-+#ifndef OVERRIDE_64_sys_getpgrp
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getpgrp)
-+#endif
-+#ifndef OVERRIDE_64_sys_setsid
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_setsid)
-+#endif
-+#ifndef OVERRIDE_64_sys_munlockall
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_munlockall)
-+#endif
-+#ifndef OVERRIDE_64_sys_vhangup
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_vhangup)
-+#endif
-+#ifndef OVERRIDE_64_sys_sync
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sync)
-+#endif
-+#ifndef OVERRIDE_64_sys_gettid
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_gettid)
-+#endif
-+#ifndef OVERRIDE_64_sys_restart_syscall
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_restart_syscall)
-+#endif
-+#ifndef OVERRIDE_64_sys_inotify_init
-+SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_inotify_init)
-+#endif
-+#ifndef OVERRIDE_64_sys_close
-+SC_TRACE_EVENT(sys_close,
-+ TP_PROTO(unsigned int fd),
-+ TP_ARGS(fd),
-+ TP_STRUCT__entry(__field(unsigned int, fd)),
-+ TP_fast_assign(tp_assign(fd, fd)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_brk
-+SC_TRACE_EVENT(sys_brk,
-+ TP_PROTO(unsigned long brk),
-+ TP_ARGS(brk),
-+ TP_STRUCT__entry(__field(unsigned long, brk)),
-+ TP_fast_assign(tp_assign(brk, brk)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_dup
-+SC_TRACE_EVENT(sys_dup,
-+ TP_PROTO(unsigned int fildes),
-+ TP_ARGS(fildes),
-+ TP_STRUCT__entry(__field(unsigned int, fildes)),
-+ TP_fast_assign(tp_assign(fildes, fildes)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_alarm
-+SC_TRACE_EVENT(sys_alarm,
-+ TP_PROTO(unsigned int seconds),
-+ TP_ARGS(seconds),
-+ TP_STRUCT__entry(__field(unsigned int, seconds)),
-+ TP_fast_assign(tp_assign(seconds, seconds)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_exit
-+SC_TRACE_EVENT(sys_exit,
-+ TP_PROTO(int error_code),
-+ TP_ARGS(error_code),
-+ TP_STRUCT__entry(__field(int, error_code)),
-+ TP_fast_assign(tp_assign(error_code, error_code)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_fsync
-+SC_TRACE_EVENT(sys_fsync,
-+ TP_PROTO(unsigned int fd),
-+ TP_ARGS(fd),
-+ TP_STRUCT__entry(__field(unsigned int, fd)),
-+ TP_fast_assign(tp_assign(fd, fd)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_fdatasync
-+SC_TRACE_EVENT(sys_fdatasync,
-+ TP_PROTO(unsigned int fd),
-+ TP_ARGS(fd),
-+ TP_STRUCT__entry(__field(unsigned int, fd)),
-+ TP_fast_assign(tp_assign(fd, fd)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_fchdir
-+SC_TRACE_EVENT(sys_fchdir,
-+ TP_PROTO(unsigned int fd),
-+ TP_ARGS(fd),
-+ TP_STRUCT__entry(__field(unsigned int, fd)),
-+ TP_fast_assign(tp_assign(fd, fd)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_umask
-+SC_TRACE_EVENT(sys_umask,
-+ TP_PROTO(int mask),
-+ TP_ARGS(mask),
-+ TP_STRUCT__entry(__field(int, mask)),
-+ TP_fast_assign(tp_assign(mask, mask)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_setuid
-+SC_TRACE_EVENT(sys_setuid,
-+ TP_PROTO(uid_t uid),
-+ TP_ARGS(uid),
-+ TP_STRUCT__entry(__field(uid_t, uid)),
-+ TP_fast_assign(tp_assign(uid, uid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_setgid
-+SC_TRACE_EVENT(sys_setgid,
-+ TP_PROTO(gid_t gid),
-+ TP_ARGS(gid),
-+ TP_STRUCT__entry(__field(gid_t, gid)),
-+ TP_fast_assign(tp_assign(gid, gid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_getpgid
-+SC_TRACE_EVENT(sys_getpgid,
-+ TP_PROTO(pid_t pid),
-+ TP_ARGS(pid),
-+ TP_STRUCT__entry(__field(pid_t, pid)),
-+ TP_fast_assign(tp_assign(pid, pid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_setfsuid
-+SC_TRACE_EVENT(sys_setfsuid,
-+ TP_PROTO(uid_t uid),
-+ TP_ARGS(uid),
-+ TP_STRUCT__entry(__field(uid_t, uid)),
-+ TP_fast_assign(tp_assign(uid, uid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_setfsgid
-+SC_TRACE_EVENT(sys_setfsgid,
-+ TP_PROTO(gid_t gid),
-+ TP_ARGS(gid),
-+ TP_STRUCT__entry(__field(gid_t, gid)),
-+ TP_fast_assign(tp_assign(gid, gid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_getsid
-+SC_TRACE_EVENT(sys_getsid,
-+ TP_PROTO(pid_t pid),
-+ TP_ARGS(pid),
-+ TP_STRUCT__entry(__field(pid_t, pid)),
-+ TP_fast_assign(tp_assign(pid, pid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_personality
-+SC_TRACE_EVENT(sys_personality,
-+ TP_PROTO(unsigned int personality),
-+ TP_ARGS(personality),
-+ TP_STRUCT__entry(__field(unsigned int, personality)),
-+ TP_fast_assign(tp_assign(personality, personality)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_sched_getscheduler
-+SC_TRACE_EVENT(sys_sched_getscheduler,
-+ TP_PROTO(pid_t pid),
-+ TP_ARGS(pid),
-+ TP_STRUCT__entry(__field(pid_t, pid)),
-+ TP_fast_assign(tp_assign(pid, pid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_sched_get_priority_max
-+SC_TRACE_EVENT(sys_sched_get_priority_max,
-+ TP_PROTO(int policy),
-+ TP_ARGS(policy),
-+ TP_STRUCT__entry(__field(int, policy)),
-+ TP_fast_assign(tp_assign(policy, policy)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_sched_get_priority_min
-+SC_TRACE_EVENT(sys_sched_get_priority_min,
-+ TP_PROTO(int policy),
-+ TP_ARGS(policy),
-+ TP_STRUCT__entry(__field(int, policy)),
-+ TP_fast_assign(tp_assign(policy, policy)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_mlockall
-+SC_TRACE_EVENT(sys_mlockall,
-+ TP_PROTO(int flags),
-+ TP_ARGS(flags),
-+ TP_STRUCT__entry(__field(int, flags)),
-+ TP_fast_assign(tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_io_destroy
-+SC_TRACE_EVENT(sys_io_destroy,
-+ TP_PROTO(aio_context_t ctx),
-+ TP_ARGS(ctx),
-+ TP_STRUCT__entry(__field(aio_context_t, ctx)),
-+ TP_fast_assign(tp_assign(ctx, ctx)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_epoll_create
-+SC_TRACE_EVENT(sys_epoll_create,
-+ TP_PROTO(int size),
-+ TP_ARGS(size),
-+ TP_STRUCT__entry(__field(int, size)),
-+ TP_fast_assign(tp_assign(size, size)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_timer_getoverrun
-+SC_TRACE_EVENT(sys_timer_getoverrun,
-+ TP_PROTO(timer_t timer_id),
-+ TP_ARGS(timer_id),
-+ TP_STRUCT__entry(__field(timer_t, timer_id)),
-+ TP_fast_assign(tp_assign(timer_id, timer_id)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_timer_delete
-+SC_TRACE_EVENT(sys_timer_delete,
-+ TP_PROTO(timer_t timer_id),
-+ TP_ARGS(timer_id),
-+ TP_STRUCT__entry(__field(timer_t, timer_id)),
-+ TP_fast_assign(tp_assign(timer_id, timer_id)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_exit_group
-+SC_TRACE_EVENT(sys_exit_group,
-+ TP_PROTO(int error_code),
-+ TP_ARGS(error_code),
-+ TP_STRUCT__entry(__field(int, error_code)),
-+ TP_fast_assign(tp_assign(error_code, error_code)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_unshare
-+SC_TRACE_EVENT(sys_unshare,
-+ TP_PROTO(unsigned long unshare_flags),
-+ TP_ARGS(unshare_flags),
-+ TP_STRUCT__entry(__field(unsigned long, unshare_flags)),
-+ TP_fast_assign(tp_assign(unshare_flags, unshare_flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_eventfd
-+SC_TRACE_EVENT(sys_eventfd,
-+ TP_PROTO(unsigned int count),
-+ TP_ARGS(count),
-+ TP_STRUCT__entry(__field(unsigned int, count)),
-+ TP_fast_assign(tp_assign(count, count)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_epoll_create1
-+SC_TRACE_EVENT(sys_epoll_create1,
-+ TP_PROTO(int flags),
-+ TP_ARGS(flags),
-+ TP_STRUCT__entry(__field(int, flags)),
-+ TP_fast_assign(tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_inotify_init1
-+SC_TRACE_EVENT(sys_inotify_init1,
-+ TP_PROTO(int flags),
-+ TP_ARGS(flags),
-+ TP_STRUCT__entry(__field(int, flags)),
-+ TP_fast_assign(tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_syncfs
-+SC_TRACE_EVENT(sys_syncfs,
-+ TP_PROTO(int fd),
-+ TP_ARGS(fd),
-+ TP_STRUCT__entry(__field(int, fd)),
-+ TP_fast_assign(tp_assign(fd, fd)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_munmap
-+SC_TRACE_EVENT(sys_munmap,
-+ TP_PROTO(unsigned long addr, size_t len),
-+ TP_ARGS(addr, len),
-+ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(size_t, len)),
-+ TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_dup2
-+SC_TRACE_EVENT(sys_dup2,
-+ TP_PROTO(unsigned int oldfd, unsigned int newfd),
-+ TP_ARGS(oldfd, newfd),
-+ TP_STRUCT__entry(__field(unsigned int, oldfd) __field(unsigned int, newfd)),
-+ TP_fast_assign(tp_assign(oldfd, oldfd) tp_assign(newfd, newfd)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_shutdown
-+SC_TRACE_EVENT(sys_shutdown,
-+ TP_PROTO(int fd, int how),
-+ TP_ARGS(fd, how),
-+ TP_STRUCT__entry(__field(int, fd) __field(int, how)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(how, how)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_listen
-+SC_TRACE_EVENT(sys_listen,
-+ TP_PROTO(int fd, int backlog),
-+ TP_ARGS(fd, backlog),
-+ TP_STRUCT__entry(__field(int, fd) __field(int, backlog)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(backlog, backlog)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_kill
-+SC_TRACE_EVENT(sys_kill,
-+ TP_PROTO(pid_t pid, int sig),
-+ TP_ARGS(pid, sig),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_msgget
-+SC_TRACE_EVENT(sys_msgget,
-+ TP_PROTO(key_t key, int msgflg),
-+ TP_ARGS(key, msgflg),
-+ TP_STRUCT__entry(__field(key_t, key) __field(int, msgflg)),
-+ TP_fast_assign(tp_assign(key, key) tp_assign(msgflg, msgflg)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_flock
-+SC_TRACE_EVENT(sys_flock,
-+ TP_PROTO(unsigned int fd, unsigned int cmd),
-+ TP_ARGS(fd, cmd),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_ftruncate
-+SC_TRACE_EVENT(sys_ftruncate,
-+ TP_PROTO(unsigned int fd, unsigned long length),
-+ TP_ARGS(fd, length),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned long, length)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(length, length)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_fchmod
-+SC_TRACE_EVENT(sys_fchmod,
-+ TP_PROTO(unsigned int fd, mode_t mode),
-+ TP_ARGS(fd, mode),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field(mode_t, mode)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(mode, mode)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_setpgid
-+SC_TRACE_EVENT(sys_setpgid,
-+ TP_PROTO(pid_t pid, pid_t pgid),
-+ TP_ARGS(pid, pgid),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field(pid_t, pgid)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(pgid, pgid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_setreuid
-+SC_TRACE_EVENT(sys_setreuid,
-+ TP_PROTO(uid_t ruid, uid_t euid),
-+ TP_ARGS(ruid, euid),
-+ TP_STRUCT__entry(__field(uid_t, ruid) __field(uid_t, euid)),
-+ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_setregid
-+SC_TRACE_EVENT(sys_setregid,
-+ TP_PROTO(gid_t rgid, gid_t egid),
-+ TP_ARGS(rgid, egid),
-+ TP_STRUCT__entry(__field(gid_t, rgid) __field(gid_t, egid)),
-+ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_getpriority
-+SC_TRACE_EVENT(sys_getpriority,
-+ TP_PROTO(int which, int who),
-+ TP_ARGS(which, who),
-+ TP_STRUCT__entry(__field(int, which) __field(int, who)),
-+ TP_fast_assign(tp_assign(which, which) tp_assign(who, who)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_mlock
-+SC_TRACE_EVENT(sys_mlock,
-+ TP_PROTO(unsigned long start, size_t len),
-+ TP_ARGS(start, len),
-+ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len)),
-+ TP_fast_assign(tp_assign(start, start) tp_assign(len, len)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_munlock
-+SC_TRACE_EVENT(sys_munlock,
-+ TP_PROTO(unsigned long start, size_t len),
-+ TP_ARGS(start, len),
-+ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len)),
-+ TP_fast_assign(tp_assign(start, start) tp_assign(len, len)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_tkill
-+SC_TRACE_EVENT(sys_tkill,
-+ TP_PROTO(pid_t pid, int sig),
-+ TP_ARGS(pid, sig),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_ioprio_get
-+SC_TRACE_EVENT(sys_ioprio_get,
-+ TP_PROTO(int which, int who),
-+ TP_ARGS(which, who),
-+ TP_STRUCT__entry(__field(int, which) __field(int, who)),
-+ TP_fast_assign(tp_assign(which, which) tp_assign(who, who)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_inotify_rm_watch
-+SC_TRACE_EVENT(sys_inotify_rm_watch,
-+ TP_PROTO(int fd, __s32 wd),
-+ TP_ARGS(fd, wd),
-+ TP_STRUCT__entry(__field(int, fd) __field(__s32, wd)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(wd, wd)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_timerfd_create
-+SC_TRACE_EVENT(sys_timerfd_create,
-+ TP_PROTO(int clockid, int flags),
-+ TP_ARGS(clockid, flags),
-+ TP_STRUCT__entry(__field(int, clockid) __field(int, flags)),
-+ TP_fast_assign(tp_assign(clockid, clockid) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_eventfd2
-+SC_TRACE_EVENT(sys_eventfd2,
-+ TP_PROTO(unsigned int count, int flags),
-+ TP_ARGS(count, flags),
-+ TP_STRUCT__entry(__field(unsigned int, count) __field(int, flags)),
-+ TP_fast_assign(tp_assign(count, count) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_setns
-+SC_TRACE_EVENT(sys_setns,
-+ TP_PROTO(int fd, int nstype),
-+ TP_ARGS(fd, nstype),
-+ TP_STRUCT__entry(__field(int, fd) __field(int, nstype)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(nstype, nstype)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_lseek
-+SC_TRACE_EVENT(sys_lseek,
-+ TP_PROTO(unsigned int fd, off_t offset, unsigned int origin),
-+ TP_ARGS(fd, offset, origin),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field(off_t, offset) __field(unsigned int, origin)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(offset, offset) tp_assign(origin, origin)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_mprotect
-+SC_TRACE_EVENT(sys_mprotect,
-+ TP_PROTO(unsigned long start, size_t len, unsigned long prot),
-+ TP_ARGS(start, len, prot),
-+ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field(unsigned long, prot)),
-+ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(prot, prot)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_ioctl
-+SC_TRACE_EVENT(sys_ioctl,
-+ TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
-+ TP_ARGS(fd, cmd, arg),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_msync
-+SC_TRACE_EVENT(sys_msync,
-+ TP_PROTO(unsigned long start, size_t len, int flags),
-+ TP_ARGS(start, len, flags),
-+ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field(int, flags)),
-+ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_madvise
-+SC_TRACE_EVENT(sys_madvise,
-+ TP_PROTO(unsigned long start, size_t len_in, int behavior),
-+ TP_ARGS(start, len_in, behavior),
-+ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len_in) __field(int, behavior)),
-+ TP_fast_assign(tp_assign(start, start) tp_assign(len_in, len_in) tp_assign(behavior, behavior)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_shmget
-+SC_TRACE_EVENT(sys_shmget,
-+ TP_PROTO(key_t key, size_t size, int shmflg),
-+ TP_ARGS(key, size, shmflg),
-+ TP_STRUCT__entry(__field(key_t, key) __field(size_t, size) __field(int, shmflg)),
-+ TP_fast_assign(tp_assign(key, key) tp_assign(size, size) tp_assign(shmflg, shmflg)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_socket
-+SC_TRACE_EVENT(sys_socket,
-+ TP_PROTO(int family, int type, int protocol),
-+ TP_ARGS(family, type, protocol),
-+ TP_STRUCT__entry(__field(int, family) __field(int, type) __field(int, protocol)),
-+ TP_fast_assign(tp_assign(family, family) tp_assign(type, type) tp_assign(protocol, protocol)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_semget
-+SC_TRACE_EVENT(sys_semget,
-+ TP_PROTO(key_t key, int nsems, int semflg),
-+ TP_ARGS(key, nsems, semflg),
-+ TP_STRUCT__entry(__field(key_t, key) __field(int, nsems) __field(int, semflg)),
-+ TP_fast_assign(tp_assign(key, key) tp_assign(nsems, nsems) tp_assign(semflg, semflg)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_fcntl
-+SC_TRACE_EVENT(sys_fcntl,
-+ TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
-+ TP_ARGS(fd, cmd, arg),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_fchown
-+SC_TRACE_EVENT(sys_fchown,
-+ TP_PROTO(unsigned int fd, uid_t user, gid_t group),
-+ TP_ARGS(fd, user, group),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field(uid_t, user) __field(gid_t, group)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(user, user) tp_assign(group, group)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_setresuid
-+SC_TRACE_EVENT(sys_setresuid,
-+ TP_PROTO(uid_t ruid, uid_t euid, uid_t suid),
-+ TP_ARGS(ruid, euid, suid),
-+ TP_STRUCT__entry(__field(uid_t, ruid) __field(uid_t, euid) __field(uid_t, suid)),
-+ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_setresgid
-+SC_TRACE_EVENT(sys_setresgid,
-+ TP_PROTO(gid_t rgid, gid_t egid, gid_t sgid),
-+ TP_ARGS(rgid, egid, sgid),
-+ TP_STRUCT__entry(__field(gid_t, rgid) __field(gid_t, egid) __field(gid_t, sgid)),
-+ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_sysfs
-+SC_TRACE_EVENT(sys_sysfs,
-+ TP_PROTO(int option, unsigned long arg1, unsigned long arg2),
-+ TP_ARGS(option, arg1, arg2),
-+ TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg1) __field(unsigned long, arg2)),
-+ TP_fast_assign(tp_assign(option, option) tp_assign(arg1, arg1) tp_assign(arg2, arg2)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_setpriority
-+SC_TRACE_EVENT(sys_setpriority,
-+ TP_PROTO(int which, int who, int niceval),
-+ TP_ARGS(which, who, niceval),
-+ TP_STRUCT__entry(__field(int, which) __field(int, who) __field(int, niceval)),
-+ TP_fast_assign(tp_assign(which, which) tp_assign(who, who) tp_assign(niceval, niceval)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_tgkill
-+SC_TRACE_EVENT(sys_tgkill,
-+ TP_PROTO(pid_t tgid, pid_t pid, int sig),
-+ TP_ARGS(tgid, pid, sig),
-+ TP_STRUCT__entry(__field(pid_t, tgid) __field(pid_t, pid) __field(int, sig)),
-+ TP_fast_assign(tp_assign(tgid, tgid) tp_assign(pid, pid) tp_assign(sig, sig)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_ioprio_set
-+SC_TRACE_EVENT(sys_ioprio_set,
-+ TP_PROTO(int which, int who, int ioprio),
-+ TP_ARGS(which, who, ioprio),
-+ TP_STRUCT__entry(__field(int, which) __field(int, who) __field(int, ioprio)),
-+ TP_fast_assign(tp_assign(which, which) tp_assign(who, who) tp_assign(ioprio, ioprio)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_dup3
-+SC_TRACE_EVENT(sys_dup3,
-+ TP_PROTO(unsigned int oldfd, unsigned int newfd, int flags),
-+ TP_ARGS(oldfd, newfd, flags),
-+ TP_STRUCT__entry(__field(unsigned int, oldfd) __field(unsigned int, newfd) __field(int, flags)),
-+ TP_fast_assign(tp_assign(oldfd, oldfd) tp_assign(newfd, newfd) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_ptrace
-+SC_TRACE_EVENT(sys_ptrace,
-+ TP_PROTO(long request, long pid, unsigned long addr, unsigned long data),
-+ TP_ARGS(request, pid, addr, data),
-+ TP_STRUCT__entry(__field(long, request) __field(long, pid) __field_hex(unsigned long, addr) __field(unsigned long, data)),
-+ TP_fast_assign(tp_assign(request, request) tp_assign(pid, pid) tp_assign(addr, addr) tp_assign(data, data)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_tee
-+SC_TRACE_EVENT(sys_tee,
-+ TP_PROTO(int fdin, int fdout, size_t len, unsigned int flags),
-+ TP_ARGS(fdin, fdout, len, flags),
-+ TP_STRUCT__entry(__field(int, fdin) __field(int, fdout) __field(size_t, len) __field(unsigned int, flags)),
-+ TP_fast_assign(tp_assign(fdin, fdin) tp_assign(fdout, fdout) tp_assign(len, len) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_mremap
-+SC_TRACE_EVENT(sys_mremap,
-+ TP_PROTO(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr),
-+ TP_ARGS(addr, old_len, new_len, flags, new_addr),
-+ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(unsigned long, old_len) __field(unsigned long, new_len) __field(unsigned long, flags) __field_hex(unsigned long, new_addr)),
-+ TP_fast_assign(tp_assign(addr, addr) tp_assign(old_len, old_len) tp_assign(new_len, new_len) tp_assign(flags, flags) tp_assign(new_addr, new_addr)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_prctl
-+SC_TRACE_EVENT(sys_prctl,
-+ TP_PROTO(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5),
-+ TP_ARGS(option, arg2, arg3, arg4, arg5),
-+ TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg2) __field(unsigned long, arg3) __field(unsigned long, arg4) __field(unsigned long, arg5)),
-+ TP_fast_assign(tp_assign(option, option) tp_assign(arg2, arg2) tp_assign(arg3, arg3) tp_assign(arg4, arg4) tp_assign(arg5, arg5)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_remap_file_pages
-+SC_TRACE_EVENT(sys_remap_file_pages,
-+ TP_PROTO(unsigned long start, unsigned long size, unsigned long prot, unsigned long pgoff, unsigned long flags),
-+ TP_ARGS(start, size, prot, pgoff, flags),
-+ TP_STRUCT__entry(__field(unsigned long, start) __field(unsigned long, size) __field(unsigned long, prot) __field(unsigned long, pgoff) __field(unsigned long, flags)),
-+ TP_fast_assign(tp_assign(start, start) tp_assign(size, size) tp_assign(prot, prot) tp_assign(pgoff, pgoff) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_mmap
-+SC_TRACE_EVENT(sys_mmap,
-+ TP_PROTO(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long off),
-+ TP_ARGS(addr, len, prot, flags, fd, off),
-+ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(unsigned long, len) __field(unsigned long, prot) __field(unsigned long, flags) __field(unsigned long, fd) __field(unsigned long, off)),
-+ TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len) tp_assign(prot, prot) tp_assign(flags, flags) tp_assign(fd, fd) tp_assign(off, off)),
-+ TP_printk()
-+)
-+#endif
-+
-+#endif /* _TRACE_SYSCALLS_INTEGERS_H */
-+
-+/* This part must be outside protection */
-+#include "../../../probes/define_trace.h"
-+
-+#else /* CREATE_SYSCALL_TABLE */
-+
-+#include "x86-64-syscalls-3.0.4_integers_override.h"
-+#include "syscalls_integers_override.h"
-+
-+#ifndef OVERRIDE_TABLE_64_sys_sched_yield
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sched_yield, 24, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_pause
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_pause, 34, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getpid
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getpid, 39, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getuid
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getuid, 102, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getgid
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getgid, 104, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_geteuid
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_geteuid, 107, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getegid
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getegid, 108, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getppid
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getppid, 110, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getpgrp
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getpgrp, 111, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_setsid
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_setsid, 112, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_munlockall
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_munlockall, 152, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_vhangup
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_vhangup, 153, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_sync
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sync, 162, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_gettid
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_gettid, 186, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_restart_syscall
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_restart_syscall, 219, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_inotify_init
-+TRACE_SYSCALL_TABLE(syscalls_noargs, sys_inotify_init, 253, 0)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_close
-+TRACE_SYSCALL_TABLE(sys_close, sys_close, 3, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_lseek
-+TRACE_SYSCALL_TABLE(sys_lseek, sys_lseek, 8, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_mmap
-+TRACE_SYSCALL_TABLE(sys_mmap, sys_mmap, 9, 6)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_mprotect
-+TRACE_SYSCALL_TABLE(sys_mprotect, sys_mprotect, 10, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_munmap
-+TRACE_SYSCALL_TABLE(sys_munmap, sys_munmap, 11, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_brk
-+TRACE_SYSCALL_TABLE(sys_brk, sys_brk, 12, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_ioctl
-+TRACE_SYSCALL_TABLE(sys_ioctl, sys_ioctl, 16, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_mremap
-+TRACE_SYSCALL_TABLE(sys_mremap, sys_mremap, 25, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_msync
-+TRACE_SYSCALL_TABLE(sys_msync, sys_msync, 26, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_madvise
-+TRACE_SYSCALL_TABLE(sys_madvise, sys_madvise, 28, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_shmget
-+TRACE_SYSCALL_TABLE(sys_shmget, sys_shmget, 29, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_dup
-+TRACE_SYSCALL_TABLE(sys_dup, sys_dup, 32, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_dup2
-+TRACE_SYSCALL_TABLE(sys_dup2, sys_dup2, 33, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_alarm
-+TRACE_SYSCALL_TABLE(sys_alarm, sys_alarm, 37, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_socket
-+TRACE_SYSCALL_TABLE(sys_socket, sys_socket, 41, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_shutdown
-+TRACE_SYSCALL_TABLE(sys_shutdown, sys_shutdown, 48, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_listen
-+TRACE_SYSCALL_TABLE(sys_listen, sys_listen, 50, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_exit
-+TRACE_SYSCALL_TABLE(sys_exit, sys_exit, 60, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_kill
-+TRACE_SYSCALL_TABLE(sys_kill, sys_kill, 62, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_semget
-+TRACE_SYSCALL_TABLE(sys_semget, sys_semget, 64, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_msgget
-+TRACE_SYSCALL_TABLE(sys_msgget, sys_msgget, 68, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_fcntl
-+TRACE_SYSCALL_TABLE(sys_fcntl, sys_fcntl, 72, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_flock
-+TRACE_SYSCALL_TABLE(sys_flock, sys_flock, 73, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_fsync
-+TRACE_SYSCALL_TABLE(sys_fsync, sys_fsync, 74, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_fdatasync
-+TRACE_SYSCALL_TABLE(sys_fdatasync, sys_fdatasync, 75, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_ftruncate
-+TRACE_SYSCALL_TABLE(sys_ftruncate, sys_ftruncate, 77, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_fchdir
-+TRACE_SYSCALL_TABLE(sys_fchdir, sys_fchdir, 81, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_fchmod
-+TRACE_SYSCALL_TABLE(sys_fchmod, sys_fchmod, 91, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_fchown
-+TRACE_SYSCALL_TABLE(sys_fchown, sys_fchown, 93, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_umask
-+TRACE_SYSCALL_TABLE(sys_umask, sys_umask, 95, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_ptrace
-+TRACE_SYSCALL_TABLE(sys_ptrace, sys_ptrace, 101, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_setuid
-+TRACE_SYSCALL_TABLE(sys_setuid, sys_setuid, 105, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_setgid
-+TRACE_SYSCALL_TABLE(sys_setgid, sys_setgid, 106, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_setpgid
-+TRACE_SYSCALL_TABLE(sys_setpgid, sys_setpgid, 109, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_setreuid
-+TRACE_SYSCALL_TABLE(sys_setreuid, sys_setreuid, 113, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_setregid
-+TRACE_SYSCALL_TABLE(sys_setregid, sys_setregid, 114, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_setresuid
-+TRACE_SYSCALL_TABLE(sys_setresuid, sys_setresuid, 117, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_setresgid
-+TRACE_SYSCALL_TABLE(sys_setresgid, sys_setresgid, 119, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getpgid
-+TRACE_SYSCALL_TABLE(sys_getpgid, sys_getpgid, 121, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_setfsuid
-+TRACE_SYSCALL_TABLE(sys_setfsuid, sys_setfsuid, 122, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_setfsgid
-+TRACE_SYSCALL_TABLE(sys_setfsgid, sys_setfsgid, 123, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getsid
-+TRACE_SYSCALL_TABLE(sys_getsid, sys_getsid, 124, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_personality
-+TRACE_SYSCALL_TABLE(sys_personality, sys_personality, 135, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_sysfs
-+TRACE_SYSCALL_TABLE(sys_sysfs, sys_sysfs, 139, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getpriority
-+TRACE_SYSCALL_TABLE(sys_getpriority, sys_getpriority, 140, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_setpriority
-+TRACE_SYSCALL_TABLE(sys_setpriority, sys_setpriority, 141, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_sched_getscheduler
-+TRACE_SYSCALL_TABLE(sys_sched_getscheduler, sys_sched_getscheduler, 145, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_sched_get_priority_max
-+TRACE_SYSCALL_TABLE(sys_sched_get_priority_max, sys_sched_get_priority_max, 146, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_sched_get_priority_min
-+TRACE_SYSCALL_TABLE(sys_sched_get_priority_min, sys_sched_get_priority_min, 147, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_mlock
-+TRACE_SYSCALL_TABLE(sys_mlock, sys_mlock, 149, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_munlock
-+TRACE_SYSCALL_TABLE(sys_munlock, sys_munlock, 150, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_mlockall
-+TRACE_SYSCALL_TABLE(sys_mlockall, sys_mlockall, 151, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_prctl
-+TRACE_SYSCALL_TABLE(sys_prctl, sys_prctl, 157, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_tkill
-+TRACE_SYSCALL_TABLE(sys_tkill, sys_tkill, 200, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_io_destroy
-+TRACE_SYSCALL_TABLE(sys_io_destroy, sys_io_destroy, 207, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_epoll_create
-+TRACE_SYSCALL_TABLE(sys_epoll_create, sys_epoll_create, 213, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_remap_file_pages
-+TRACE_SYSCALL_TABLE(sys_remap_file_pages, sys_remap_file_pages, 216, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_timer_getoverrun
-+TRACE_SYSCALL_TABLE(sys_timer_getoverrun, sys_timer_getoverrun, 225, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_timer_delete
-+TRACE_SYSCALL_TABLE(sys_timer_delete, sys_timer_delete, 226, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_exit_group
-+TRACE_SYSCALL_TABLE(sys_exit_group, sys_exit_group, 231, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_tgkill
-+TRACE_SYSCALL_TABLE(sys_tgkill, sys_tgkill, 234, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_ioprio_set
-+TRACE_SYSCALL_TABLE(sys_ioprio_set, sys_ioprio_set, 251, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_ioprio_get
-+TRACE_SYSCALL_TABLE(sys_ioprio_get, sys_ioprio_get, 252, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_inotify_rm_watch
-+TRACE_SYSCALL_TABLE(sys_inotify_rm_watch, sys_inotify_rm_watch, 255, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_unshare
-+TRACE_SYSCALL_TABLE(sys_unshare, sys_unshare, 272, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_tee
-+TRACE_SYSCALL_TABLE(sys_tee, sys_tee, 276, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_timerfd_create
-+TRACE_SYSCALL_TABLE(sys_timerfd_create, sys_timerfd_create, 283, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_eventfd
-+TRACE_SYSCALL_TABLE(sys_eventfd, sys_eventfd, 284, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_eventfd2
-+TRACE_SYSCALL_TABLE(sys_eventfd2, sys_eventfd2, 290, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_epoll_create1
-+TRACE_SYSCALL_TABLE(sys_epoll_create1, sys_epoll_create1, 291, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_dup3
-+TRACE_SYSCALL_TABLE(sys_dup3, sys_dup3, 292, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_inotify_init1
-+TRACE_SYSCALL_TABLE(sys_inotify_init1, sys_inotify_init1, 294, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_syncfs
-+TRACE_SYSCALL_TABLE(sys_syncfs, sys_syncfs, 306, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_setns
-+TRACE_SYSCALL_TABLE(sys_setns, sys_setns, 308, 2)
-+#endif
-+
-+#endif /* CREATE_SYSCALL_TABLE */
-diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_integers_override.h b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_integers_override.h
-new file mode 100644
-index 0000000..3d400f7
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_integers_override.h
-@@ -0,0 +1,3 @@
-+/*
-+ * this is a place-holder for x86_64 interger syscall definition override.
-+ */
-diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_pointers.h b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_pointers.h
-new file mode 100644
-index 0000000..e926a60
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_pointers.h
-@@ -0,0 +1,2076 @@
-+/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */
-+#ifndef CREATE_SYSCALL_TABLE
-+
-+#if !defined(_TRACE_SYSCALLS_POINTERS_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_SYSCALLS_POINTERS_H
-+
-+#include <linux/tracepoint.h>
-+#include <linux/syscalls.h>
-+#include "x86-64-syscalls-3.0.4_pointers_override.h"
-+#include "syscalls_pointers_override.h"
-+
-+#ifndef OVERRIDE_64_sys_pipe
-+SC_TRACE_EVENT(sys_pipe,
-+ TP_PROTO(int * fildes),
-+ TP_ARGS(fildes),
-+ TP_STRUCT__entry(__field_hex(int *, fildes)),
-+ TP_fast_assign(tp_assign(fildes, fildes)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_newuname
-+SC_TRACE_EVENT(sys_newuname,
-+ TP_PROTO(struct new_utsname * name),
-+ TP_ARGS(name),
-+ TP_STRUCT__entry(__field_hex(struct new_utsname *, name)),
-+ TP_fast_assign(tp_assign(name, name)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_shmdt
-+SC_TRACE_EVENT(sys_shmdt,
-+ TP_PROTO(char * shmaddr),
-+ TP_ARGS(shmaddr),
-+ TP_STRUCT__entry(__field_hex(char *, shmaddr)),
-+ TP_fast_assign(tp_assign(shmaddr, shmaddr)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_chdir
-+SC_TRACE_EVENT(sys_chdir,
-+ TP_PROTO(const char * filename),
-+ TP_ARGS(filename),
-+ TP_STRUCT__entry(__string_from_user(filename, filename)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_rmdir
-+SC_TRACE_EVENT(sys_rmdir,
-+ TP_PROTO(const char * pathname),
-+ TP_ARGS(pathname),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_unlink
-+SC_TRACE_EVENT(sys_unlink,
-+ TP_PROTO(const char * pathname),
-+ TP_ARGS(pathname),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_sysinfo
-+SC_TRACE_EVENT(sys_sysinfo,
-+ TP_PROTO(struct sysinfo * info),
-+ TP_ARGS(info),
-+ TP_STRUCT__entry(__field_hex(struct sysinfo *, info)),
-+ TP_fast_assign(tp_assign(info, info)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_times
-+SC_TRACE_EVENT(sys_times,
-+ TP_PROTO(struct tms * tbuf),
-+ TP_ARGS(tbuf),
-+ TP_STRUCT__entry(__field_hex(struct tms *, tbuf)),
-+ TP_fast_assign(tp_assign(tbuf, tbuf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_sysctl
-+SC_TRACE_EVENT(sys_sysctl,
-+ TP_PROTO(struct __sysctl_args * args),
-+ TP_ARGS(args),
-+ TP_STRUCT__entry(__field_hex(struct __sysctl_args *, args)),
-+ TP_fast_assign(tp_assign(args, args)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_adjtimex
-+SC_TRACE_EVENT(sys_adjtimex,
-+ TP_PROTO(struct timex * txc_p),
-+ TP_ARGS(txc_p),
-+ TP_STRUCT__entry(__field_hex(struct timex *, txc_p)),
-+ TP_fast_assign(tp_assign(txc_p, txc_p)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_chroot
-+SC_TRACE_EVENT(sys_chroot,
-+ TP_PROTO(const char * filename),
-+ TP_ARGS(filename),
-+ TP_STRUCT__entry(__string_from_user(filename, filename)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_swapoff
-+SC_TRACE_EVENT(sys_swapoff,
-+ TP_PROTO(const char * specialfile),
-+ TP_ARGS(specialfile),
-+ TP_STRUCT__entry(__string_from_user(specialfile, specialfile)),
-+ TP_fast_assign(tp_copy_string_from_user(specialfile, specialfile)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_time
-+SC_TRACE_EVENT(sys_time,
-+ TP_PROTO(time_t * tloc),
-+ TP_ARGS(tloc),
-+ TP_STRUCT__entry(__field_hex(time_t *, tloc)),
-+ TP_fast_assign(tp_assign(tloc, tloc)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_set_tid_address
-+SC_TRACE_EVENT(sys_set_tid_address,
-+ TP_PROTO(int * tidptr),
-+ TP_ARGS(tidptr),
-+ TP_STRUCT__entry(__field_hex(int *, tidptr)),
-+ TP_fast_assign(tp_assign(tidptr, tidptr)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_mq_unlink
-+SC_TRACE_EVENT(sys_mq_unlink,
-+ TP_PROTO(const char * u_name),
-+ TP_ARGS(u_name),
-+ TP_STRUCT__entry(__string_from_user(u_name, u_name)),
-+ TP_fast_assign(tp_copy_string_from_user(u_name, u_name)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_newstat
-+SC_TRACE_EVENT(sys_newstat,
-+ TP_PROTO(const char * filename, struct stat * statbuf),
-+ TP_ARGS(filename, statbuf),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat *, statbuf)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_newfstat
-+SC_TRACE_EVENT(sys_newfstat,
-+ TP_PROTO(unsigned int fd, struct stat * statbuf),
-+ TP_ARGS(fd, statbuf),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct stat *, statbuf)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(statbuf, statbuf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_newlstat
-+SC_TRACE_EVENT(sys_newlstat,
-+ TP_PROTO(const char * filename, struct stat * statbuf),
-+ TP_ARGS(filename, statbuf),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat *, statbuf)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_access
-+SC_TRACE_EVENT(sys_access,
-+ TP_PROTO(const char * filename, int mode),
-+ TP_ARGS(filename, mode),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, mode)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_nanosleep
-+SC_TRACE_EVENT(sys_nanosleep,
-+ TP_PROTO(struct timespec * rqtp, struct timespec * rmtp),
-+ TP_ARGS(rqtp, rmtp),
-+ TP_STRUCT__entry(__field_hex(struct timespec *, rqtp) __field_hex(struct timespec *, rmtp)),
-+ TP_fast_assign(tp_assign(rqtp, rqtp) tp_assign(rmtp, rmtp)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_getitimer
-+SC_TRACE_EVENT(sys_getitimer,
-+ TP_PROTO(int which, struct itimerval * value),
-+ TP_ARGS(which, value),
-+ TP_STRUCT__entry(__field(int, which) __field_hex(struct itimerval *, value)),
-+ TP_fast_assign(tp_assign(which, which) tp_assign(value, value)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_truncate
-+SC_TRACE_EVENT(sys_truncate,
-+ TP_PROTO(const char * path, long length),
-+ TP_ARGS(path, length),
-+ TP_STRUCT__entry(__string_from_user(path, path) __field(long, length)),
-+ TP_fast_assign(tp_copy_string_from_user(path, path) tp_assign(length, length)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_getcwd
-+SC_TRACE_EVENT(sys_getcwd,
-+ TP_PROTO(char * buf, unsigned long size),
-+ TP_ARGS(buf, size),
-+ TP_STRUCT__entry(__field_hex(char *, buf) __field(unsigned long, size)),
-+ TP_fast_assign(tp_assign(buf, buf) tp_assign(size, size)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_rename
-+SC_TRACE_EVENT(sys_rename,
-+ TP_PROTO(const char * oldname, const char * newname),
-+ TP_ARGS(oldname, newname),
-+ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
-+ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_mkdir
-+SC_TRACE_EVENT(sys_mkdir,
-+ TP_PROTO(const char * pathname, int mode),
-+ TP_ARGS(pathname, mode),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(int, mode)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_creat
-+SC_TRACE_EVENT(sys_creat,
-+ TP_PROTO(const char * pathname, int mode),
-+ TP_ARGS(pathname, mode),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(int, mode)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_link
-+SC_TRACE_EVENT(sys_link,
-+ TP_PROTO(const char * oldname, const char * newname),
-+ TP_ARGS(oldname, newname),
-+ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
-+ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_symlink
-+SC_TRACE_EVENT(sys_symlink,
-+ TP_PROTO(const char * oldname, const char * newname),
-+ TP_ARGS(oldname, newname),
-+ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
-+ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_chmod
-+SC_TRACE_EVENT(sys_chmod,
-+ TP_PROTO(const char * filename, mode_t mode),
-+ TP_ARGS(filename, mode),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field(mode_t, mode)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_gettimeofday
-+SC_TRACE_EVENT(sys_gettimeofday,
-+ TP_PROTO(struct timeval * tv, struct timezone * tz),
-+ TP_ARGS(tv, tz),
-+ TP_STRUCT__entry(__field_hex(struct timeval *, tv) __field_hex(struct timezone *, tz)),
-+ TP_fast_assign(tp_assign(tv, tv) tp_assign(tz, tz)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_getrlimit
-+SC_TRACE_EVENT(sys_getrlimit,
-+ TP_PROTO(unsigned int resource, struct rlimit * rlim),
-+ TP_ARGS(resource, rlim),
-+ TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
-+ TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_getrusage
-+SC_TRACE_EVENT(sys_getrusage,
-+ TP_PROTO(int who, struct rusage * ru),
-+ TP_ARGS(who, ru),
-+ TP_STRUCT__entry(__field(int, who) __field_hex(struct rusage *, ru)),
-+ TP_fast_assign(tp_assign(who, who) tp_assign(ru, ru)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_getgroups
-+SC_TRACE_EVENT(sys_getgroups,
-+ TP_PROTO(int gidsetsize, gid_t * grouplist),
-+ TP_ARGS(gidsetsize, grouplist),
-+ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(gid_t *, grouplist)),
-+ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_setgroups
-+SC_TRACE_EVENT(sys_setgroups,
-+ TP_PROTO(int gidsetsize, gid_t * grouplist),
-+ TP_ARGS(gidsetsize, grouplist),
-+ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(gid_t *, grouplist)),
-+ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_rt_sigpending
-+SC_TRACE_EVENT(sys_rt_sigpending,
-+ TP_PROTO(sigset_t * set, size_t sigsetsize),
-+ TP_ARGS(set, sigsetsize),
-+ TP_STRUCT__entry(__field_hex(sigset_t *, set) __field(size_t, sigsetsize)),
-+ TP_fast_assign(tp_assign(set, set) tp_assign(sigsetsize, sigsetsize)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_rt_sigsuspend
-+SC_TRACE_EVENT(sys_rt_sigsuspend,
-+ TP_PROTO(sigset_t * unewset, size_t sigsetsize),
-+ TP_ARGS(unewset, sigsetsize),
-+ TP_STRUCT__entry(__field_hex(sigset_t *, unewset) __field(size_t, sigsetsize)),
-+ TP_fast_assign(tp_assign(unewset, unewset) tp_assign(sigsetsize, sigsetsize)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_utime
-+SC_TRACE_EVENT(sys_utime,
-+ TP_PROTO(char * filename, struct utimbuf * times),
-+ TP_ARGS(filename, times),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct utimbuf *, times)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(times, times)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_ustat
-+SC_TRACE_EVENT(sys_ustat,
-+ TP_PROTO(unsigned dev, struct ustat * ubuf),
-+ TP_ARGS(dev, ubuf),
-+ TP_STRUCT__entry(__field(unsigned, dev) __field_hex(struct ustat *, ubuf)),
-+ TP_fast_assign(tp_assign(dev, dev) tp_assign(ubuf, ubuf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_statfs
-+SC_TRACE_EVENT(sys_statfs,
-+ TP_PROTO(const char * pathname, struct statfs * buf),
-+ TP_ARGS(pathname, buf),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(struct statfs *, buf)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(buf, buf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_fstatfs
-+SC_TRACE_EVENT(sys_fstatfs,
-+ TP_PROTO(unsigned int fd, struct statfs * buf),
-+ TP_ARGS(fd, buf),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct statfs *, buf)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_sched_setparam
-+SC_TRACE_EVENT(sys_sched_setparam,
-+ TP_PROTO(pid_t pid, struct sched_param * param),
-+ TP_ARGS(pid, param),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct sched_param *, param)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(param, param)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_sched_getparam
-+SC_TRACE_EVENT(sys_sched_getparam,
-+ TP_PROTO(pid_t pid, struct sched_param * param),
-+ TP_ARGS(pid, param),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct sched_param *, param)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(param, param)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_sched_rr_get_interval
-+SC_TRACE_EVENT(sys_sched_rr_get_interval,
-+ TP_PROTO(pid_t pid, struct timespec * interval),
-+ TP_ARGS(pid, interval),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct timespec *, interval)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(interval, interval)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_pivot_root
-+SC_TRACE_EVENT(sys_pivot_root,
-+ TP_PROTO(const char * new_root, const char * put_old),
-+ TP_ARGS(new_root, put_old),
-+ TP_STRUCT__entry(__string_from_user(new_root, new_root) __string_from_user(put_old, put_old)),
-+ TP_fast_assign(tp_copy_string_from_user(new_root, new_root) tp_copy_string_from_user(put_old, put_old)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_setrlimit
-+SC_TRACE_EVENT(sys_setrlimit,
-+ TP_PROTO(unsigned int resource, struct rlimit * rlim),
-+ TP_ARGS(resource, rlim),
-+ TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
-+ TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_settimeofday
-+SC_TRACE_EVENT(sys_settimeofday,
-+ TP_PROTO(struct timeval * tv, struct timezone * tz),
-+ TP_ARGS(tv, tz),
-+ TP_STRUCT__entry(__field_hex(struct timeval *, tv) __field_hex(struct timezone *, tz)),
-+ TP_fast_assign(tp_assign(tv, tv) tp_assign(tz, tz)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_umount
-+SC_TRACE_EVENT(sys_umount,
-+ TP_PROTO(char * name, int flags),
-+ TP_ARGS(name, flags),
-+ TP_STRUCT__entry(__string_from_user(name, name) __field(int, flags)),
-+ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_swapon
-+SC_TRACE_EVENT(sys_swapon,
-+ TP_PROTO(const char * specialfile, int swap_flags),
-+ TP_ARGS(specialfile, swap_flags),
-+ TP_STRUCT__entry(__string_from_user(specialfile, specialfile) __field(int, swap_flags)),
-+ TP_fast_assign(tp_copy_string_from_user(specialfile, specialfile) tp_assign(swap_flags, swap_flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_sethostname
-+SC_TRACE_EVENT(sys_sethostname,
-+ TP_PROTO(char * name, int len),
-+ TP_ARGS(name, len),
-+ TP_STRUCT__entry(__string_from_user(name, name) __field(int, len)),
-+ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(len, len)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_setdomainname
-+SC_TRACE_EVENT(sys_setdomainname,
-+ TP_PROTO(char * name, int len),
-+ TP_ARGS(name, len),
-+ TP_STRUCT__entry(__string_from_user(name, name) __field(int, len)),
-+ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(len, len)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_delete_module
-+SC_TRACE_EVENT(sys_delete_module,
-+ TP_PROTO(const char * name_user, unsigned int flags),
-+ TP_ARGS(name_user, flags),
-+ TP_STRUCT__entry(__string_from_user(name_user, name_user) __field(unsigned int, flags)),
-+ TP_fast_assign(tp_copy_string_from_user(name_user, name_user) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_removexattr
-+SC_TRACE_EVENT(sys_removexattr,
-+ TP_PROTO(const char * pathname, const char * name),
-+ TP_ARGS(pathname, name),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_lremovexattr
-+SC_TRACE_EVENT(sys_lremovexattr,
-+ TP_PROTO(const char * pathname, const char * name),
-+ TP_ARGS(pathname, name),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_fremovexattr
-+SC_TRACE_EVENT(sys_fremovexattr,
-+ TP_PROTO(int fd, const char * name),
-+ TP_ARGS(fd, name),
-+ TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_io_setup
-+SC_TRACE_EVENT(sys_io_setup,
-+ TP_PROTO(unsigned nr_events, aio_context_t * ctxp),
-+ TP_ARGS(nr_events, ctxp),
-+ TP_STRUCT__entry(__field(unsigned, nr_events) __field_hex(aio_context_t *, ctxp)),
-+ TP_fast_assign(tp_assign(nr_events, nr_events) tp_assign(ctxp, ctxp)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_timer_gettime
-+SC_TRACE_EVENT(sys_timer_gettime,
-+ TP_PROTO(timer_t timer_id, struct itimerspec * setting),
-+ TP_ARGS(timer_id, setting),
-+ TP_STRUCT__entry(__field(timer_t, timer_id) __field_hex(struct itimerspec *, setting)),
-+ TP_fast_assign(tp_assign(timer_id, timer_id) tp_assign(setting, setting)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_clock_settime
-+SC_TRACE_EVENT(sys_clock_settime,
-+ TP_PROTO(const clockid_t which_clock, const struct timespec * tp),
-+ TP_ARGS(which_clock, tp),
-+ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(const struct timespec *, tp)),
-+ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_clock_gettime
-+SC_TRACE_EVENT(sys_clock_gettime,
-+ TP_PROTO(const clockid_t which_clock, struct timespec * tp),
-+ TP_ARGS(which_clock, tp),
-+ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timespec *, tp)),
-+ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_clock_getres
-+SC_TRACE_EVENT(sys_clock_getres,
-+ TP_PROTO(const clockid_t which_clock, struct timespec * tp),
-+ TP_ARGS(which_clock, tp),
-+ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timespec *, tp)),
-+ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_utimes
-+SC_TRACE_EVENT(sys_utimes,
-+ TP_PROTO(char * filename, struct timeval * utimes),
-+ TP_ARGS(filename, utimes),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct timeval *, utimes)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_mq_notify
-+SC_TRACE_EVENT(sys_mq_notify,
-+ TP_PROTO(mqd_t mqdes, const struct sigevent * u_notification),
-+ TP_ARGS(mqdes, u_notification),
-+ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const struct sigevent *, u_notification)),
-+ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_notification, u_notification)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_set_robust_list
-+SC_TRACE_EVENT(sys_set_robust_list,
-+ TP_PROTO(struct robust_list_head * head, size_t len),
-+ TP_ARGS(head, len),
-+ TP_STRUCT__entry(__field_hex(struct robust_list_head *, head) __field(size_t, len)),
-+ TP_fast_assign(tp_assign(head, head) tp_assign(len, len)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_timerfd_gettime
-+SC_TRACE_EVENT(sys_timerfd_gettime,
-+ TP_PROTO(int ufd, struct itimerspec * otmr),
-+ TP_ARGS(ufd, otmr),
-+ TP_STRUCT__entry(__field(int, ufd) __field_hex(struct itimerspec *, otmr)),
-+ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(otmr, otmr)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_pipe2
-+SC_TRACE_EVENT(sys_pipe2,
-+ TP_PROTO(int * fildes, int flags),
-+ TP_ARGS(fildes, flags),
-+ TP_STRUCT__entry(__field_hex(int *, fildes) __field(int, flags)),
-+ TP_fast_assign(tp_assign(fildes, fildes) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_clock_adjtime
-+SC_TRACE_EVENT(sys_clock_adjtime,
-+ TP_PROTO(const clockid_t which_clock, struct timex * utx),
-+ TP_ARGS(which_clock, utx),
-+ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timex *, utx)),
-+ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(utx, utx)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_read
-+SC_TRACE_EVENT(sys_read,
-+ TP_PROTO(unsigned int fd, char * buf, size_t count),
-+ TP_ARGS(fd, buf, count),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(char *, buf) __field(size_t, count)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_write
-+SC_TRACE_EVENT(sys_write,
-+ TP_PROTO(unsigned int fd, const char * buf, size_t count),
-+ TP_ARGS(fd, buf, count),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(const char *, buf) __field(size_t, count)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_open
-+SC_TRACE_EVENT(sys_open,
-+ TP_PROTO(const char * filename, int flags, int mode),
-+ TP_ARGS(filename, flags, mode),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, flags) __field(int, mode)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(flags, flags) tp_assign(mode, mode)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_poll
-+SC_TRACE_EVENT(sys_poll,
-+ TP_PROTO(struct pollfd * ufds, unsigned int nfds, long timeout_msecs),
-+ TP_ARGS(ufds, nfds, timeout_msecs),
-+ TP_STRUCT__entry(__field_hex(struct pollfd *, ufds) __field(unsigned int, nfds) __field(long, timeout_msecs)),
-+ TP_fast_assign(tp_assign(ufds, ufds) tp_assign(nfds, nfds) tp_assign(timeout_msecs, timeout_msecs)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_readv
-+SC_TRACE_EVENT(sys_readv,
-+ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen),
-+ TP_ARGS(fd, vec, vlen),
-+ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_writev
-+SC_TRACE_EVENT(sys_writev,
-+ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen),
-+ TP_ARGS(fd, vec, vlen),
-+ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_mincore
-+SC_TRACE_EVENT(sys_mincore,
-+ TP_PROTO(unsigned long start, size_t len, unsigned char * vec),
-+ TP_ARGS(start, len, vec),
-+ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field_hex(unsigned char *, vec)),
-+ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(vec, vec)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_shmat
-+SC_TRACE_EVENT(sys_shmat,
-+ TP_PROTO(int shmid, char * shmaddr, int shmflg),
-+ TP_ARGS(shmid, shmaddr, shmflg),
-+ TP_STRUCT__entry(__field(int, shmid) __field_hex(char *, shmaddr) __field(int, shmflg)),
-+ TP_fast_assign(tp_assign(shmid, shmid) tp_assign(shmaddr, shmaddr) tp_assign(shmflg, shmflg)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_shmctl
-+SC_TRACE_EVENT(sys_shmctl,
-+ TP_PROTO(int shmid, int cmd, struct shmid_ds * buf),
-+ TP_ARGS(shmid, cmd, buf),
-+ TP_STRUCT__entry(__field(int, shmid) __field(int, cmd) __field_hex(struct shmid_ds *, buf)),
-+ TP_fast_assign(tp_assign(shmid, shmid) tp_assign(cmd, cmd) tp_assign(buf, buf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_setitimer
-+SC_TRACE_EVENT(sys_setitimer,
-+ TP_PROTO(int which, struct itimerval * value, struct itimerval * ovalue),
-+ TP_ARGS(which, value, ovalue),
-+ TP_STRUCT__entry(__field(int, which) __field_hex(struct itimerval *, value) __field_hex(struct itimerval *, ovalue)),
-+ TP_fast_assign(tp_assign(which, which) tp_assign(value, value) tp_assign(ovalue, ovalue)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_connect
-+SC_TRACE_EVENT(sys_connect,
-+ TP_PROTO(int fd, struct sockaddr * uservaddr, int addrlen),
-+ TP_ARGS(fd, uservaddr, addrlen),
-+ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, uservaddr) __field_hex(int, addrlen)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(uservaddr, uservaddr) tp_assign(addrlen, addrlen)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_accept
-+SC_TRACE_EVENT(sys_accept,
-+ TP_PROTO(int fd, struct sockaddr * upeer_sockaddr, int * upeer_addrlen),
-+ TP_ARGS(fd, upeer_sockaddr, upeer_addrlen),
-+ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, upeer_sockaddr) __field_hex(int *, upeer_addrlen)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(upeer_sockaddr, upeer_sockaddr) tp_assign(upeer_addrlen, upeer_addrlen)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_sendmsg
-+SC_TRACE_EVENT(sys_sendmsg,
-+ TP_PROTO(int fd, struct msghdr * msg, unsigned flags),
-+ TP_ARGS(fd, msg, flags),
-+ TP_STRUCT__entry(__field(int, fd) __field_hex(struct msghdr *, msg) __field(unsigned, flags)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(msg, msg) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_recvmsg
-+SC_TRACE_EVENT(sys_recvmsg,
-+ TP_PROTO(int fd, struct msghdr * msg, unsigned int flags),
-+ TP_ARGS(fd, msg, flags),
-+ TP_STRUCT__entry(__field(int, fd) __field_hex(struct msghdr *, msg) __field(unsigned int, flags)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(msg, msg) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_bind
-+SC_TRACE_EVENT(sys_bind,
-+ TP_PROTO(int fd, struct sockaddr * umyaddr, int addrlen),
-+ TP_ARGS(fd, umyaddr, addrlen),
-+ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, umyaddr) __field_hex(int, addrlen)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(umyaddr, umyaddr) tp_assign(addrlen, addrlen)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_getsockname
-+SC_TRACE_EVENT(sys_getsockname,
-+ TP_PROTO(int fd, struct sockaddr * usockaddr, int * usockaddr_len),
-+ TP_ARGS(fd, usockaddr, usockaddr_len),
-+ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, usockaddr) __field_hex(int *, usockaddr_len)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(usockaddr, usockaddr) tp_assign(usockaddr_len, usockaddr_len)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_getpeername
-+SC_TRACE_EVENT(sys_getpeername,
-+ TP_PROTO(int fd, struct sockaddr * usockaddr, int * usockaddr_len),
-+ TP_ARGS(fd, usockaddr, usockaddr_len),
-+ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, usockaddr) __field_hex(int *, usockaddr_len)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(usockaddr, usockaddr) tp_assign(usockaddr_len, usockaddr_len)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_semop
-+SC_TRACE_EVENT(sys_semop,
-+ TP_PROTO(int semid, struct sembuf * tsops, unsigned nsops),
-+ TP_ARGS(semid, tsops, nsops),
-+ TP_STRUCT__entry(__field(int, semid) __field_hex(struct sembuf *, tsops) __field(unsigned, nsops)),
-+ TP_fast_assign(tp_assign(semid, semid) tp_assign(tsops, tsops) tp_assign(nsops, nsops)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_msgctl
-+SC_TRACE_EVENT(sys_msgctl,
-+ TP_PROTO(int msqid, int cmd, struct msqid_ds * buf),
-+ TP_ARGS(msqid, cmd, buf),
-+ TP_STRUCT__entry(__field(int, msqid) __field(int, cmd) __field_hex(struct msqid_ds *, buf)),
-+ TP_fast_assign(tp_assign(msqid, msqid) tp_assign(cmd, cmd) tp_assign(buf, buf)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_getdents
-+SC_TRACE_EVENT(sys_getdents,
-+ TP_PROTO(unsigned int fd, struct linux_dirent * dirent, unsigned int count),
-+ TP_ARGS(fd, dirent, count),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct linux_dirent *, dirent) __field(unsigned int, count)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_readlink
-+SC_TRACE_EVENT(sys_readlink,
-+ TP_PROTO(const char * path, char * buf, int bufsiz),
-+ TP_ARGS(path, buf, bufsiz),
-+ TP_STRUCT__entry(__string_from_user(path, path) __field_hex(char *, buf) __field(int, bufsiz)),
-+ TP_fast_assign(tp_copy_string_from_user(path, path) tp_assign(buf, buf) tp_assign(bufsiz, bufsiz)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_chown
-+SC_TRACE_EVENT(sys_chown,
-+ TP_PROTO(const char * filename, uid_t user, gid_t group),
-+ TP_ARGS(filename, user, group),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_lchown
-+SC_TRACE_EVENT(sys_lchown,
-+ TP_PROTO(const char * filename, uid_t user, gid_t group),
-+ TP_ARGS(filename, user, group),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_syslog
-+SC_TRACE_EVENT(sys_syslog,
-+ TP_PROTO(int type, char * buf, int len),
-+ TP_ARGS(type, buf, len),
-+ TP_STRUCT__entry(__field(int, type) __field_hex(char *, buf) __field(int, len)),
-+ TP_fast_assign(tp_assign(type, type) tp_assign(buf, buf) tp_assign(len, len)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_getresuid
-+SC_TRACE_EVENT(sys_getresuid,
-+ TP_PROTO(uid_t * ruid, uid_t * euid, uid_t * suid),
-+ TP_ARGS(ruid, euid, suid),
-+ TP_STRUCT__entry(__field_hex(uid_t *, ruid) __field_hex(uid_t *, euid) __field_hex(uid_t *, suid)),
-+ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_getresgid
-+SC_TRACE_EVENT(sys_getresgid,
-+ TP_PROTO(gid_t * rgid, gid_t * egid, gid_t * sgid),
-+ TP_ARGS(rgid, egid, sgid),
-+ TP_STRUCT__entry(__field_hex(gid_t *, rgid) __field_hex(gid_t *, egid) __field_hex(gid_t *, sgid)),
-+ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_rt_sigqueueinfo
-+SC_TRACE_EVENT(sys_rt_sigqueueinfo,
-+ TP_PROTO(pid_t pid, int sig, siginfo_t * uinfo),
-+ TP_ARGS(pid, sig, uinfo),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig) __field_hex(siginfo_t *, uinfo)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig) tp_assign(uinfo, uinfo)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_mknod
-+SC_TRACE_EVENT(sys_mknod,
-+ TP_PROTO(const char * filename, int mode, unsigned dev),
-+ TP_ARGS(filename, mode, dev),
-+ TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, mode) __field(unsigned, dev)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode) tp_assign(dev, dev)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_sched_setscheduler
-+SC_TRACE_EVENT(sys_sched_setscheduler,
-+ TP_PROTO(pid_t pid, int policy, struct sched_param * param),
-+ TP_ARGS(pid, policy, param),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field(int, policy) __field_hex(struct sched_param *, param)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(policy, policy) tp_assign(param, param)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_init_module
-+SC_TRACE_EVENT(sys_init_module,
-+ TP_PROTO(void * umod, unsigned long len, const char * uargs),
-+ TP_ARGS(umod, len, uargs),
-+ TP_STRUCT__entry(__field_hex(void *, umod) __field(unsigned long, len) __field_hex(const char *, uargs)),
-+ TP_fast_assign(tp_assign(umod, umod) tp_assign(len, len) tp_assign(uargs, uargs)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_nfsservctl
-+SC_TRACE_EVENT(sys_nfsservctl,
-+ TP_PROTO(int cmd, struct nfsctl_arg * arg, void * res),
-+ TP_ARGS(cmd, arg, res),
-+ TP_STRUCT__entry(__field(int, cmd) __field_hex(struct nfsctl_arg *, arg) __field_hex(void *, res)),
-+ TP_fast_assign(tp_assign(cmd, cmd) tp_assign(arg, arg) tp_assign(res, res)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_listxattr
-+SC_TRACE_EVENT(sys_listxattr,
-+ TP_PROTO(const char * pathname, char * list, size_t size),
-+ TP_ARGS(pathname, list, size),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(char *, list) __field(size_t, size)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(list, list) tp_assign(size, size)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_llistxattr
-+SC_TRACE_EVENT(sys_llistxattr,
-+ TP_PROTO(const char * pathname, char * list, size_t size),
-+ TP_ARGS(pathname, list, size),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(char *, list) __field(size_t, size)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(list, list) tp_assign(size, size)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_flistxattr
-+SC_TRACE_EVENT(sys_flistxattr,
-+ TP_PROTO(int fd, char * list, size_t size),
-+ TP_ARGS(fd, list, size),
-+ TP_STRUCT__entry(__field(int, fd) __field_hex(char *, list) __field(size_t, size)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(list, list) tp_assign(size, size)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_sched_setaffinity
-+SC_TRACE_EVENT(sys_sched_setaffinity,
-+ TP_PROTO(pid_t pid, unsigned int len, unsigned long * user_mask_ptr),
-+ TP_ARGS(pid, len, user_mask_ptr),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, len) __field_hex(unsigned long *, user_mask_ptr)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(len, len) tp_assign(user_mask_ptr, user_mask_ptr)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_sched_getaffinity
-+SC_TRACE_EVENT(sys_sched_getaffinity,
-+ TP_PROTO(pid_t pid, unsigned int len, unsigned long * user_mask_ptr),
-+ TP_ARGS(pid, len, user_mask_ptr),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, len) __field_hex(unsigned long *, user_mask_ptr)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(len, len) tp_assign(user_mask_ptr, user_mask_ptr)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_io_submit
-+SC_TRACE_EVENT(sys_io_submit,
-+ TP_PROTO(aio_context_t ctx_id, long nr, struct iocb * * iocbpp),
-+ TP_ARGS(ctx_id, nr, iocbpp),
-+ TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field(long, nr) __field_hex(struct iocb * *, iocbpp)),
-+ TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(nr, nr) tp_assign(iocbpp, iocbpp)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_io_cancel
-+SC_TRACE_EVENT(sys_io_cancel,
-+ TP_PROTO(aio_context_t ctx_id, struct iocb * iocb, struct io_event * result),
-+ TP_ARGS(ctx_id, iocb, result),
-+ TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field_hex(struct iocb *, iocb) __field_hex(struct io_event *, result)),
-+ TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(iocb, iocb) tp_assign(result, result)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_getdents64
-+SC_TRACE_EVENT(sys_getdents64,
-+ TP_PROTO(unsigned int fd, struct linux_dirent64 * dirent, unsigned int count),
-+ TP_ARGS(fd, dirent, count),
-+ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct linux_dirent64 *, dirent) __field(unsigned int, count)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_timer_create
-+SC_TRACE_EVENT(sys_timer_create,
-+ TP_PROTO(const clockid_t which_clock, struct sigevent * timer_event_spec, timer_t * created_timer_id),
-+ TP_ARGS(which_clock, timer_event_spec, created_timer_id),
-+ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct sigevent *, timer_event_spec) __field_hex(timer_t *, created_timer_id)),
-+ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(timer_event_spec, timer_event_spec) tp_assign(created_timer_id, created_timer_id)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_mq_getsetattr
-+SC_TRACE_EVENT(sys_mq_getsetattr,
-+ TP_PROTO(mqd_t mqdes, const struct mq_attr * u_mqstat, struct mq_attr * u_omqstat),
-+ TP_ARGS(mqdes, u_mqstat, u_omqstat),
-+ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const struct mq_attr *, u_mqstat) __field_hex(struct mq_attr *, u_omqstat)),
-+ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_mqstat, u_mqstat) tp_assign(u_omqstat, u_omqstat)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_inotify_add_watch
-+SC_TRACE_EVENT(sys_inotify_add_watch,
-+ TP_PROTO(int fd, const char * pathname, u32 mask),
-+ TP_ARGS(fd, pathname, mask),
-+ TP_STRUCT__entry(__field(int, fd) __string_from_user(pathname, pathname) __field(u32, mask)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(pathname, pathname) tp_assign(mask, mask)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_mkdirat
-+SC_TRACE_EVENT(sys_mkdirat,
-+ TP_PROTO(int dfd, const char * pathname, int mode),
-+ TP_ARGS(dfd, pathname, mode),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field(int, mode)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_futimesat
-+SC_TRACE_EVENT(sys_futimesat,
-+ TP_PROTO(int dfd, const char * filename, struct timeval * utimes),
-+ TP_ARGS(dfd, filename, utimes),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct timeval *, utimes)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_unlinkat
-+SC_TRACE_EVENT(sys_unlinkat,
-+ TP_PROTO(int dfd, const char * pathname, int flag),
-+ TP_ARGS(dfd, pathname, flag),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field(int, flag)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(flag, flag)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_symlinkat
-+SC_TRACE_EVENT(sys_symlinkat,
-+ TP_PROTO(const char * oldname, int newdfd, const char * newname),
-+ TP_ARGS(oldname, newdfd, newname),
-+ TP_STRUCT__entry(__string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname)),
-+ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_fchmodat
-+SC_TRACE_EVENT(sys_fchmodat,
-+ TP_PROTO(int dfd, const char * filename, mode_t mode),
-+ TP_ARGS(dfd, filename, mode),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(mode_t, mode)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_faccessat
-+SC_TRACE_EVENT(sys_faccessat,
-+ TP_PROTO(int dfd, const char * filename, int mode),
-+ TP_ARGS(dfd, filename, mode),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, mode)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_get_robust_list
-+SC_TRACE_EVENT(sys_get_robust_list,
-+ TP_PROTO(int pid, struct robust_list_head * * head_ptr, size_t * len_ptr),
-+ TP_ARGS(pid, head_ptr, len_ptr),
-+ TP_STRUCT__entry(__field(int, pid) __field_hex(struct robust_list_head * *, head_ptr) __field_hex(size_t *, len_ptr)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(head_ptr, head_ptr) tp_assign(len_ptr, len_ptr)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_signalfd
-+SC_TRACE_EVENT(sys_signalfd,
-+ TP_PROTO(int ufd, sigset_t * user_mask, size_t sizemask),
-+ TP_ARGS(ufd, user_mask, sizemask),
-+ TP_STRUCT__entry(__field(int, ufd) __field_hex(sigset_t *, user_mask) __field(size_t, sizemask)),
-+ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(user_mask, user_mask) tp_assign(sizemask, sizemask)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_rt_sigaction
-+SC_TRACE_EVENT(sys_rt_sigaction,
-+ TP_PROTO(int sig, const struct sigaction * act, struct sigaction * oact, size_t sigsetsize),
-+ TP_ARGS(sig, act, oact, sigsetsize),
-+ TP_STRUCT__entry(__field(int, sig) __field_hex(const struct sigaction *, act) __field_hex(struct sigaction *, oact) __field(size_t, sigsetsize)),
-+ TP_fast_assign(tp_assign(sig, sig) tp_assign(act, act) tp_assign(oact, oact) tp_assign(sigsetsize, sigsetsize)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_rt_sigprocmask
-+SC_TRACE_EVENT(sys_rt_sigprocmask,
-+ TP_PROTO(int how, sigset_t * nset, sigset_t * oset, size_t sigsetsize),
-+ TP_ARGS(how, nset, oset, sigsetsize),
-+ TP_STRUCT__entry(__field(int, how) __field_hex(sigset_t *, nset) __field_hex(sigset_t *, oset) __field(size_t, sigsetsize)),
-+ TP_fast_assign(tp_assign(how, how) tp_assign(nset, nset) tp_assign(oset, oset) tp_assign(sigsetsize, sigsetsize)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_sendfile64
-+SC_TRACE_EVENT(sys_sendfile64,
-+ TP_PROTO(int out_fd, int in_fd, loff_t * offset, size_t count),
-+ TP_ARGS(out_fd, in_fd, offset, count),
-+ TP_STRUCT__entry(__field(int, out_fd) __field(int, in_fd) __field_hex(loff_t *, offset) __field(size_t, count)),
-+ TP_fast_assign(tp_assign(out_fd, out_fd) tp_assign(in_fd, in_fd) tp_assign(offset, offset) tp_assign(count, count)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_socketpair
-+SC_TRACE_EVENT(sys_socketpair,
-+ TP_PROTO(int family, int type, int protocol, int * usockvec),
-+ TP_ARGS(family, type, protocol, usockvec),
-+ TP_STRUCT__entry(__field(int, family) __field(int, type) __field(int, protocol) __field_hex(int *, usockvec)),
-+ TP_fast_assign(tp_assign(family, family) tp_assign(type, type) tp_assign(protocol, protocol) tp_assign(usockvec, usockvec)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_wait4
-+SC_TRACE_EVENT(sys_wait4,
-+ TP_PROTO(pid_t upid, int * stat_addr, int options, struct rusage * ru),
-+ TP_ARGS(upid, stat_addr, options, ru),
-+ TP_STRUCT__entry(__field(pid_t, upid) __field_hex(int *, stat_addr) __field(int, options) __field_hex(struct rusage *, ru)),
-+ TP_fast_assign(tp_assign(upid, upid) tp_assign(stat_addr, stat_addr) tp_assign(options, options) tp_assign(ru, ru)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_msgsnd
-+SC_TRACE_EVENT(sys_msgsnd,
-+ TP_PROTO(int msqid, struct msgbuf * msgp, size_t msgsz, int msgflg),
-+ TP_ARGS(msqid, msgp, msgsz, msgflg),
-+ TP_STRUCT__entry(__field(int, msqid) __field_hex(struct msgbuf *, msgp) __field(size_t, msgsz) __field(int, msgflg)),
-+ TP_fast_assign(tp_assign(msqid, msqid) tp_assign(msgp, msgp) tp_assign(msgsz, msgsz) tp_assign(msgflg, msgflg)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_rt_sigtimedwait
-+SC_TRACE_EVENT(sys_rt_sigtimedwait,
-+ TP_PROTO(const sigset_t * uthese, siginfo_t * uinfo, const struct timespec * uts, size_t sigsetsize),
-+ TP_ARGS(uthese, uinfo, uts, sigsetsize),
-+ TP_STRUCT__entry(__field_hex(const sigset_t *, uthese) __field_hex(siginfo_t *, uinfo) __field_hex(const struct timespec *, uts) __field(size_t, sigsetsize)),
-+ TP_fast_assign(tp_assign(uthese, uthese) tp_assign(uinfo, uinfo) tp_assign(uts, uts) tp_assign(sigsetsize, sigsetsize)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_reboot
-+SC_TRACE_EVENT(sys_reboot,
-+ TP_PROTO(int magic1, int magic2, unsigned int cmd, void * arg),
-+ TP_ARGS(magic1, magic2, cmd, arg),
-+ TP_STRUCT__entry(__field(int, magic1) __field(int, magic2) __field(unsigned int, cmd) __field_hex(void *, arg)),
-+ TP_fast_assign(tp_assign(magic1, magic1) tp_assign(magic2, magic2) tp_assign(cmd, cmd) tp_assign(arg, arg)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_getxattr
-+SC_TRACE_EVENT(sys_getxattr,
-+ TP_PROTO(const char * pathname, const char * name, void * value, size_t size),
-+ TP_ARGS(pathname, name, value, size),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_lgetxattr
-+SC_TRACE_EVENT(sys_lgetxattr,
-+ TP_PROTO(const char * pathname, const char * name, void * value, size_t size),
-+ TP_ARGS(pathname, name, value, size),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_fgetxattr
-+SC_TRACE_EVENT(sys_fgetxattr,
-+ TP_PROTO(int fd, const char * name, void * value, size_t size),
-+ TP_ARGS(fd, name, value, size),
-+ TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_semtimedop
-+SC_TRACE_EVENT(sys_semtimedop,
-+ TP_PROTO(int semid, struct sembuf * tsops, unsigned nsops, const struct timespec * timeout),
-+ TP_ARGS(semid, tsops, nsops, timeout),
-+ TP_STRUCT__entry(__field(int, semid) __field_hex(struct sembuf *, tsops) __field(unsigned, nsops) __field_hex(const struct timespec *, timeout)),
-+ TP_fast_assign(tp_assign(semid, semid) tp_assign(tsops, tsops) tp_assign(nsops, nsops) tp_assign(timeout, timeout)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_timer_settime
-+SC_TRACE_EVENT(sys_timer_settime,
-+ TP_PROTO(timer_t timer_id, int flags, const struct itimerspec * new_setting, struct itimerspec * old_setting),
-+ TP_ARGS(timer_id, flags, new_setting, old_setting),
-+ TP_STRUCT__entry(__field(timer_t, timer_id) __field(int, flags) __field_hex(const struct itimerspec *, new_setting) __field_hex(struct itimerspec *, old_setting)),
-+ TP_fast_assign(tp_assign(timer_id, timer_id) tp_assign(flags, flags) tp_assign(new_setting, new_setting) tp_assign(old_setting, old_setting)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_clock_nanosleep
-+SC_TRACE_EVENT(sys_clock_nanosleep,
-+ TP_PROTO(const clockid_t which_clock, int flags, const struct timespec * rqtp, struct timespec * rmtp),
-+ TP_ARGS(which_clock, flags, rqtp, rmtp),
-+ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field(int, flags) __field_hex(const struct timespec *, rqtp) __field_hex(struct timespec *, rmtp)),
-+ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(flags, flags) tp_assign(rqtp, rqtp) tp_assign(rmtp, rmtp)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_epoll_wait
-+SC_TRACE_EVENT(sys_epoll_wait,
-+ TP_PROTO(int epfd, struct epoll_event * events, int maxevents, int timeout),
-+ TP_ARGS(epfd, events, maxevents, timeout),
-+ TP_STRUCT__entry(__field(int, epfd) __field_hex(struct epoll_event *, events) __field(int, maxevents) __field(int, timeout)),
-+ TP_fast_assign(tp_assign(epfd, epfd) tp_assign(events, events) tp_assign(maxevents, maxevents) tp_assign(timeout, timeout)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_epoll_ctl
-+SC_TRACE_EVENT(sys_epoll_ctl,
-+ TP_PROTO(int epfd, int op, int fd, struct epoll_event * event),
-+ TP_ARGS(epfd, op, fd, event),
-+ TP_STRUCT__entry(__field(int, epfd) __field(int, op) __field(int, fd) __field_hex(struct epoll_event *, event)),
-+ TP_fast_assign(tp_assign(epfd, epfd) tp_assign(op, op) tp_assign(fd, fd) tp_assign(event, event)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_mq_open
-+SC_TRACE_EVENT(sys_mq_open,
-+ TP_PROTO(const char * u_name, int oflag, mode_t mode, struct mq_attr * u_attr),
-+ TP_ARGS(u_name, oflag, mode, u_attr),
-+ TP_STRUCT__entry(__string_from_user(u_name, u_name) __field(int, oflag) __field(mode_t, mode) __field_hex(struct mq_attr *, u_attr)),
-+ TP_fast_assign(tp_copy_string_from_user(u_name, u_name) tp_assign(oflag, oflag) tp_assign(mode, mode) tp_assign(u_attr, u_attr)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_kexec_load
-+SC_TRACE_EVENT(sys_kexec_load,
-+ TP_PROTO(unsigned long entry, unsigned long nr_segments, struct kexec_segment * segments, unsigned long flags),
-+ TP_ARGS(entry, nr_segments, segments, flags),
-+ TP_STRUCT__entry(__field(unsigned long, entry) __field(unsigned long, nr_segments) __field_hex(struct kexec_segment *, segments) __field(unsigned long, flags)),
-+ TP_fast_assign(tp_assign(entry, entry) tp_assign(nr_segments, nr_segments) tp_assign(segments, segments) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_openat
-+SC_TRACE_EVENT(sys_openat,
-+ TP_PROTO(int dfd, const char * filename, int flags, int mode),
-+ TP_ARGS(dfd, filename, flags, mode),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, flags) __field(int, mode)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(flags, flags) tp_assign(mode, mode)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_mknodat
-+SC_TRACE_EVENT(sys_mknodat,
-+ TP_PROTO(int dfd, const char * filename, int mode, unsigned dev),
-+ TP_ARGS(dfd, filename, mode, dev),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, mode) __field(unsigned, dev)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode) tp_assign(dev, dev)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_newfstatat
-+SC_TRACE_EVENT(sys_newfstatat,
-+ TP_PROTO(int dfd, const char * filename, struct stat * statbuf, int flag),
-+ TP_ARGS(dfd, filename, statbuf, flag),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct stat *, statbuf) __field(int, flag)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf) tp_assign(flag, flag)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_renameat
-+SC_TRACE_EVENT(sys_renameat,
-+ TP_PROTO(int olddfd, const char * oldname, int newdfd, const char * newname),
-+ TP_ARGS(olddfd, oldname, newdfd, newname),
-+ TP_STRUCT__entry(__field(int, olddfd) __string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname)),
-+ TP_fast_assign(tp_assign(olddfd, olddfd) tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_readlinkat
-+SC_TRACE_EVENT(sys_readlinkat,
-+ TP_PROTO(int dfd, const char * pathname, char * buf, int bufsiz),
-+ TP_ARGS(dfd, pathname, buf, bufsiz),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field_hex(char *, buf) __field(int, bufsiz)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(buf, buf) tp_assign(bufsiz, bufsiz)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_vmsplice
-+SC_TRACE_EVENT(sys_vmsplice,
-+ TP_PROTO(int fd, const struct iovec * iov, unsigned long nr_segs, unsigned int flags),
-+ TP_ARGS(fd, iov, nr_segs, flags),
-+ TP_STRUCT__entry(__field(int, fd) __field_hex(const struct iovec *, iov) __field(unsigned long, nr_segs) __field(unsigned int, flags)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(iov, iov) tp_assign(nr_segs, nr_segs) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_utimensat
-+SC_TRACE_EVENT(sys_utimensat,
-+ TP_PROTO(int dfd, const char * filename, struct timespec * utimes, int flags),
-+ TP_ARGS(dfd, filename, utimes, flags),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct timespec *, utimes) __field(int, flags)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_timerfd_settime
-+SC_TRACE_EVENT(sys_timerfd_settime,
-+ TP_PROTO(int ufd, int flags, const struct itimerspec * utmr, struct itimerspec * otmr),
-+ TP_ARGS(ufd, flags, utmr, otmr),
-+ TP_STRUCT__entry(__field(int, ufd) __field(int, flags) __field_hex(const struct itimerspec *, utmr) __field_hex(struct itimerspec *, otmr)),
-+ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(flags, flags) tp_assign(utmr, utmr) tp_assign(otmr, otmr)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_accept4
-+SC_TRACE_EVENT(sys_accept4,
-+ TP_PROTO(int fd, struct sockaddr * upeer_sockaddr, int * upeer_addrlen, int flags),
-+ TP_ARGS(fd, upeer_sockaddr, upeer_addrlen, flags),
-+ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, upeer_sockaddr) __field_hex(int *, upeer_addrlen) __field(int, flags)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(upeer_sockaddr, upeer_sockaddr) tp_assign(upeer_addrlen, upeer_addrlen) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_signalfd4
-+SC_TRACE_EVENT(sys_signalfd4,
-+ TP_PROTO(int ufd, sigset_t * user_mask, size_t sizemask, int flags),
-+ TP_ARGS(ufd, user_mask, sizemask, flags),
-+ TP_STRUCT__entry(__field(int, ufd) __field_hex(sigset_t *, user_mask) __field(size_t, sizemask) __field(int, flags)),
-+ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(user_mask, user_mask) tp_assign(sizemask, sizemask) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_rt_tgsigqueueinfo
-+SC_TRACE_EVENT(sys_rt_tgsigqueueinfo,
-+ TP_PROTO(pid_t tgid, pid_t pid, int sig, siginfo_t * uinfo),
-+ TP_ARGS(tgid, pid, sig, uinfo),
-+ TP_STRUCT__entry(__field(pid_t, tgid) __field(pid_t, pid) __field(int, sig) __field_hex(siginfo_t *, uinfo)),
-+ TP_fast_assign(tp_assign(tgid, tgid) tp_assign(pid, pid) tp_assign(sig, sig) tp_assign(uinfo, uinfo)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_prlimit64
-+SC_TRACE_EVENT(sys_prlimit64,
-+ TP_PROTO(pid_t pid, unsigned int resource, const struct rlimit64 * new_rlim, struct rlimit64 * old_rlim),
-+ TP_ARGS(pid, resource, new_rlim, old_rlim),
-+ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, resource) __field_hex(const struct rlimit64 *, new_rlim) __field_hex(struct rlimit64 *, old_rlim)),
-+ TP_fast_assign(tp_assign(pid, pid) tp_assign(resource, resource) tp_assign(new_rlim, new_rlim) tp_assign(old_rlim, old_rlim)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_sendmmsg
-+SC_TRACE_EVENT(sys_sendmmsg,
-+ TP_PROTO(int fd, struct mmsghdr * mmsg, unsigned int vlen, unsigned int flags),
-+ TP_ARGS(fd, mmsg, vlen, flags),
-+ TP_STRUCT__entry(__field(int, fd) __field_hex(struct mmsghdr *, mmsg) __field(unsigned int, vlen) __field(unsigned int, flags)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(mmsg, mmsg) tp_assign(vlen, vlen) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_select
-+SC_TRACE_EVENT(sys_select,
-+ TP_PROTO(int n, fd_set * inp, fd_set * outp, fd_set * exp, struct timeval * tvp),
-+ TP_ARGS(n, inp, outp, exp, tvp),
-+ TP_STRUCT__entry(__field(int, n) __field_hex(fd_set *, inp) __field_hex(fd_set *, outp) __field_hex(fd_set *, exp) __field_hex(struct timeval *, tvp)),
-+ TP_fast_assign(tp_assign(n, n) tp_assign(inp, inp) tp_assign(outp, outp) tp_assign(exp, exp) tp_assign(tvp, tvp)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_setsockopt
-+SC_TRACE_EVENT(sys_setsockopt,
-+ TP_PROTO(int fd, int level, int optname, char * optval, int optlen),
-+ TP_ARGS(fd, level, optname, optval, optlen),
-+ TP_STRUCT__entry(__field(int, fd) __field(int, level) __field(int, optname) __field_hex(char *, optval) __field(int, optlen)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(level, level) tp_assign(optname, optname) tp_assign(optval, optval) tp_assign(optlen, optlen)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_getsockopt
-+SC_TRACE_EVENT(sys_getsockopt,
-+ TP_PROTO(int fd, int level, int optname, char * optval, int * optlen),
-+ TP_ARGS(fd, level, optname, optval, optlen),
-+ TP_STRUCT__entry(__field(int, fd) __field(int, level) __field(int, optname) __field_hex(char *, optval) __field_hex(int *, optlen)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(level, level) tp_assign(optname, optname) tp_assign(optval, optval) tp_assign(optlen, optlen)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_msgrcv
-+SC_TRACE_EVENT(sys_msgrcv,
-+ TP_PROTO(int msqid, struct msgbuf * msgp, size_t msgsz, long msgtyp, int msgflg),
-+ TP_ARGS(msqid, msgp, msgsz, msgtyp, msgflg),
-+ TP_STRUCT__entry(__field(int, msqid) __field_hex(struct msgbuf *, msgp) __field(size_t, msgsz) __field(long, msgtyp) __field(int, msgflg)),
-+ TP_fast_assign(tp_assign(msqid, msqid) tp_assign(msgp, msgp) tp_assign(msgsz, msgsz) tp_assign(msgtyp, msgtyp) tp_assign(msgflg, msgflg)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_mount
-+SC_TRACE_EVENT(sys_mount,
-+ TP_PROTO(char * dev_name, char * dir_name, char * type, unsigned long flags, void * data),
-+ TP_ARGS(dev_name, dir_name, type, flags, data),
-+ TP_STRUCT__entry(__string_from_user(dev_name, dev_name) __string_from_user(dir_name, dir_name) __string_from_user(type, type) __field(unsigned long, flags) __field_hex(void *, data)),
-+ TP_fast_assign(tp_copy_string_from_user(dev_name, dev_name) tp_copy_string_from_user(dir_name, dir_name) tp_copy_string_from_user(type, type) tp_assign(flags, flags) tp_assign(data, data)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_setxattr
-+SC_TRACE_EVENT(sys_setxattr,
-+ TP_PROTO(const char * pathname, const char * name, const void * value, size_t size, int flags),
-+ TP_ARGS(pathname, name, value, size, flags),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_lsetxattr
-+SC_TRACE_EVENT(sys_lsetxattr,
-+ TP_PROTO(const char * pathname, const char * name, const void * value, size_t size, int flags),
-+ TP_ARGS(pathname, name, value, size, flags),
-+ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
-+ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_fsetxattr
-+SC_TRACE_EVENT(sys_fsetxattr,
-+ TP_PROTO(int fd, const char * name, const void * value, size_t size, int flags),
-+ TP_ARGS(fd, name, value, size, flags),
-+ TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_io_getevents
-+SC_TRACE_EVENT(sys_io_getevents,
-+ TP_PROTO(aio_context_t ctx_id, long min_nr, long nr, struct io_event * events, struct timespec * timeout),
-+ TP_ARGS(ctx_id, min_nr, nr, events, timeout),
-+ TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field(long, min_nr) __field(long, nr) __field_hex(struct io_event *, events) __field_hex(struct timespec *, timeout)),
-+ TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(min_nr, min_nr) tp_assign(nr, nr) tp_assign(events, events) tp_assign(timeout, timeout)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_mq_timedsend
-+SC_TRACE_EVENT(sys_mq_timedsend,
-+ TP_PROTO(mqd_t mqdes, const char * u_msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec * u_abs_timeout),
-+ TP_ARGS(mqdes, u_msg_ptr, msg_len, msg_prio, u_abs_timeout),
-+ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const char *, u_msg_ptr) __field(size_t, msg_len) __field(unsigned int, msg_prio) __field_hex(const struct timespec *, u_abs_timeout)),
-+ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_msg_ptr, u_msg_ptr) tp_assign(msg_len, msg_len) tp_assign(msg_prio, msg_prio) tp_assign(u_abs_timeout, u_abs_timeout)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_mq_timedreceive
-+SC_TRACE_EVENT(sys_mq_timedreceive,
-+ TP_PROTO(mqd_t mqdes, char * u_msg_ptr, size_t msg_len, unsigned int * u_msg_prio, const struct timespec * u_abs_timeout),
-+ TP_ARGS(mqdes, u_msg_ptr, msg_len, u_msg_prio, u_abs_timeout),
-+ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(char *, u_msg_ptr) __field(size_t, msg_len) __field_hex(unsigned int *, u_msg_prio) __field_hex(const struct timespec *, u_abs_timeout)),
-+ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_msg_ptr, u_msg_ptr) tp_assign(msg_len, msg_len) tp_assign(u_msg_prio, u_msg_prio) tp_assign(u_abs_timeout, u_abs_timeout)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_waitid
-+SC_TRACE_EVENT(sys_waitid,
-+ TP_PROTO(int which, pid_t upid, struct siginfo * infop, int options, struct rusage * ru),
-+ TP_ARGS(which, upid, infop, options, ru),
-+ TP_STRUCT__entry(__field(int, which) __field(pid_t, upid) __field_hex(struct siginfo *, infop) __field(int, options) __field_hex(struct rusage *, ru)),
-+ TP_fast_assign(tp_assign(which, which) tp_assign(upid, upid) tp_assign(infop, infop) tp_assign(options, options) tp_assign(ru, ru)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_fchownat
-+SC_TRACE_EVENT(sys_fchownat,
-+ TP_PROTO(int dfd, const char * filename, uid_t user, gid_t group, int flag),
-+ TP_ARGS(dfd, filename, user, group, flag),
-+ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group) __field(int, flag)),
-+ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group) tp_assign(flag, flag)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_linkat
-+SC_TRACE_EVENT(sys_linkat,
-+ TP_PROTO(int olddfd, const char * oldname, int newdfd, const char * newname, int flags),
-+ TP_ARGS(olddfd, oldname, newdfd, newname, flags),
-+ TP_STRUCT__entry(__field(int, olddfd) __string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname) __field(int, flags)),
-+ TP_fast_assign(tp_assign(olddfd, olddfd) tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_ppoll
-+SC_TRACE_EVENT(sys_ppoll,
-+ TP_PROTO(struct pollfd * ufds, unsigned int nfds, struct timespec * tsp, const sigset_t * sigmask, size_t sigsetsize),
-+ TP_ARGS(ufds, nfds, tsp, sigmask, sigsetsize),
-+ TP_STRUCT__entry(__field_hex(struct pollfd *, ufds) __field(unsigned int, nfds) __field_hex(struct timespec *, tsp) __field_hex(const sigset_t *, sigmask) __field(size_t, sigsetsize)),
-+ TP_fast_assign(tp_assign(ufds, ufds) tp_assign(nfds, nfds) tp_assign(tsp, tsp) tp_assign(sigmask, sigmask) tp_assign(sigsetsize, sigsetsize)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_preadv
-+SC_TRACE_EVENT(sys_preadv,
-+ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h),
-+ TP_ARGS(fd, vec, vlen, pos_l, pos_h),
-+ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen) __field(unsigned long, pos_l) __field(unsigned long, pos_h)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen) tp_assign(pos_l, pos_l) tp_assign(pos_h, pos_h)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_pwritev
-+SC_TRACE_EVENT(sys_pwritev,
-+ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h),
-+ TP_ARGS(fd, vec, vlen, pos_l, pos_h),
-+ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen) __field(unsigned long, pos_l) __field(unsigned long, pos_h)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen) tp_assign(pos_l, pos_l) tp_assign(pos_h, pos_h)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_perf_event_open
-+SC_TRACE_EVENT(sys_perf_event_open,
-+ TP_PROTO(struct perf_event_attr * attr_uptr, pid_t pid, int cpu, int group_fd, unsigned long flags),
-+ TP_ARGS(attr_uptr, pid, cpu, group_fd, flags),
-+ TP_STRUCT__entry(__field_hex(struct perf_event_attr *, attr_uptr) __field(pid_t, pid) __field(int, cpu) __field(int, group_fd) __field(unsigned long, flags)),
-+ TP_fast_assign(tp_assign(attr_uptr, attr_uptr) tp_assign(pid, pid) tp_assign(cpu, cpu) tp_assign(group_fd, group_fd) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_recvmmsg
-+SC_TRACE_EVENT(sys_recvmmsg,
-+ TP_PROTO(int fd, struct mmsghdr * mmsg, unsigned int vlen, unsigned int flags, struct timespec * timeout),
-+ TP_ARGS(fd, mmsg, vlen, flags, timeout),
-+ TP_STRUCT__entry(__field(int, fd) __field_hex(struct mmsghdr *, mmsg) __field(unsigned int, vlen) __field(unsigned int, flags) __field_hex(struct timespec *, timeout)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(mmsg, mmsg) tp_assign(vlen, vlen) tp_assign(flags, flags) tp_assign(timeout, timeout)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_sendto
-+SC_TRACE_EVENT(sys_sendto,
-+ TP_PROTO(int fd, void * buff, size_t len, unsigned flags, struct sockaddr * addr, int addr_len),
-+ TP_ARGS(fd, buff, len, flags, addr, addr_len),
-+ TP_STRUCT__entry(__field(int, fd) __field_hex(void *, buff) __field(size_t, len) __field(unsigned, flags) __field_hex(struct sockaddr *, addr) __field_hex(int, addr_len)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(buff, buff) tp_assign(len, len) tp_assign(flags, flags) tp_assign(addr, addr) tp_assign(addr_len, addr_len)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_recvfrom
-+SC_TRACE_EVENT(sys_recvfrom,
-+ TP_PROTO(int fd, void * ubuf, size_t size, unsigned flags, struct sockaddr * addr, int * addr_len),
-+ TP_ARGS(fd, ubuf, size, flags, addr, addr_len),
-+ TP_STRUCT__entry(__field(int, fd) __field_hex(void *, ubuf) __field(size_t, size) __field(unsigned, flags) __field_hex(struct sockaddr *, addr) __field_hex(int *, addr_len)),
-+ TP_fast_assign(tp_assign(fd, fd) tp_assign(ubuf, ubuf) tp_assign(size, size) tp_assign(flags, flags) tp_assign(addr, addr) tp_assign(addr_len, addr_len)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_futex
-+SC_TRACE_EVENT(sys_futex,
-+ TP_PROTO(u32 * uaddr, int op, u32 val, struct timespec * utime, u32 * uaddr2, u32 val3),
-+ TP_ARGS(uaddr, op, val, utime, uaddr2, val3),
-+ TP_STRUCT__entry(__field_hex(u32 *, uaddr) __field(int, op) __field(u32, val) __field_hex(struct timespec *, utime) __field_hex(u32 *, uaddr2) __field(u32, val3)),
-+ TP_fast_assign(tp_assign(uaddr, uaddr) tp_assign(op, op) tp_assign(val, val) tp_assign(utime, utime) tp_assign(uaddr2, uaddr2) tp_assign(val3, val3)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_pselect6
-+SC_TRACE_EVENT(sys_pselect6,
-+ TP_PROTO(int n, fd_set * inp, fd_set * outp, fd_set * exp, struct timespec * tsp, void * sig),
-+ TP_ARGS(n, inp, outp, exp, tsp, sig),
-+ TP_STRUCT__entry(__field(int, n) __field_hex(fd_set *, inp) __field_hex(fd_set *, outp) __field_hex(fd_set *, exp) __field_hex(struct timespec *, tsp) __field_hex(void *, sig)),
-+ TP_fast_assign(tp_assign(n, n) tp_assign(inp, inp) tp_assign(outp, outp) tp_assign(exp, exp) tp_assign(tsp, tsp) tp_assign(sig, sig)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_splice
-+SC_TRACE_EVENT(sys_splice,
-+ TP_PROTO(int fd_in, loff_t * off_in, int fd_out, loff_t * off_out, size_t len, unsigned int flags),
-+ TP_ARGS(fd_in, off_in, fd_out, off_out, len, flags),
-+ TP_STRUCT__entry(__field(int, fd_in) __field_hex(loff_t *, off_in) __field(int, fd_out) __field_hex(loff_t *, off_out) __field(size_t, len) __field(unsigned int, flags)),
-+ TP_fast_assign(tp_assign(fd_in, fd_in) tp_assign(off_in, off_in) tp_assign(fd_out, fd_out) tp_assign(off_out, off_out) tp_assign(len, len) tp_assign(flags, flags)),
-+ TP_printk()
-+)
-+#endif
-+#ifndef OVERRIDE_64_sys_epoll_pwait
-+SC_TRACE_EVENT(sys_epoll_pwait,
-+ TP_PROTO(int epfd, struct epoll_event * events, int maxevents, int timeout, const sigset_t * sigmask, size_t sigsetsize),
-+ TP_ARGS(epfd, events, maxevents, timeout, sigmask, sigsetsize),
-+ TP_STRUCT__entry(__field(int, epfd) __field_hex(struct epoll_event *, events) __field(int, maxevents) __field(int, timeout) __field_hex(const sigset_t *, sigmask) __field(size_t, sigsetsize)),
-+ TP_fast_assign(tp_assign(epfd, epfd) tp_assign(events, events) tp_assign(maxevents, maxevents) tp_assign(timeout, timeout) tp_assign(sigmask, sigmask) tp_assign(sigsetsize, sigsetsize)),
-+ TP_printk()
-+)
-+#endif
-+
-+#endif /* _TRACE_SYSCALLS_POINTERS_H */
-+
-+/* This part must be outside protection */
-+#include "../../../probes/define_trace.h"
-+
-+#else /* CREATE_SYSCALL_TABLE */
-+
-+#include "x86-64-syscalls-3.0.4_pointers_override.h"
-+#include "syscalls_pointers_override.h"
-+
-+#ifndef OVERRIDE_TABLE_64_sys_read
-+TRACE_SYSCALL_TABLE(sys_read, sys_read, 0, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_write
-+TRACE_SYSCALL_TABLE(sys_write, sys_write, 1, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_open
-+TRACE_SYSCALL_TABLE(sys_open, sys_open, 2, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_newstat
-+TRACE_SYSCALL_TABLE(sys_newstat, sys_newstat, 4, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_newfstat
-+TRACE_SYSCALL_TABLE(sys_newfstat, sys_newfstat, 5, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_newlstat
-+TRACE_SYSCALL_TABLE(sys_newlstat, sys_newlstat, 6, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_poll
-+TRACE_SYSCALL_TABLE(sys_poll, sys_poll, 7, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_rt_sigaction
-+TRACE_SYSCALL_TABLE(sys_rt_sigaction, sys_rt_sigaction, 13, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_rt_sigprocmask
-+TRACE_SYSCALL_TABLE(sys_rt_sigprocmask, sys_rt_sigprocmask, 14, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_readv
-+TRACE_SYSCALL_TABLE(sys_readv, sys_readv, 19, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_writev
-+TRACE_SYSCALL_TABLE(sys_writev, sys_writev, 20, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_access
-+TRACE_SYSCALL_TABLE(sys_access, sys_access, 21, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_pipe
-+TRACE_SYSCALL_TABLE(sys_pipe, sys_pipe, 22, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_select
-+TRACE_SYSCALL_TABLE(sys_select, sys_select, 23, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_mincore
-+TRACE_SYSCALL_TABLE(sys_mincore, sys_mincore, 27, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_shmat
-+TRACE_SYSCALL_TABLE(sys_shmat, sys_shmat, 30, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_shmctl
-+TRACE_SYSCALL_TABLE(sys_shmctl, sys_shmctl, 31, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_nanosleep
-+TRACE_SYSCALL_TABLE(sys_nanosleep, sys_nanosleep, 35, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getitimer
-+TRACE_SYSCALL_TABLE(sys_getitimer, sys_getitimer, 36, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_setitimer
-+TRACE_SYSCALL_TABLE(sys_setitimer, sys_setitimer, 38, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_sendfile64
-+TRACE_SYSCALL_TABLE(sys_sendfile64, sys_sendfile64, 40, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_connect
-+TRACE_SYSCALL_TABLE(sys_connect, sys_connect, 42, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_accept
-+TRACE_SYSCALL_TABLE(sys_accept, sys_accept, 43, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_sendto
-+TRACE_SYSCALL_TABLE(sys_sendto, sys_sendto, 44, 6)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_recvfrom
-+TRACE_SYSCALL_TABLE(sys_recvfrom, sys_recvfrom, 45, 6)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_sendmsg
-+TRACE_SYSCALL_TABLE(sys_sendmsg, sys_sendmsg, 46, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_recvmsg
-+TRACE_SYSCALL_TABLE(sys_recvmsg, sys_recvmsg, 47, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_bind
-+TRACE_SYSCALL_TABLE(sys_bind, sys_bind, 49, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getsockname
-+TRACE_SYSCALL_TABLE(sys_getsockname, sys_getsockname, 51, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getpeername
-+TRACE_SYSCALL_TABLE(sys_getpeername, sys_getpeername, 52, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_socketpair
-+TRACE_SYSCALL_TABLE(sys_socketpair, sys_socketpair, 53, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_setsockopt
-+TRACE_SYSCALL_TABLE(sys_setsockopt, sys_setsockopt, 54, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getsockopt
-+TRACE_SYSCALL_TABLE(sys_getsockopt, sys_getsockopt, 55, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_wait4
-+TRACE_SYSCALL_TABLE(sys_wait4, sys_wait4, 61, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_newuname
-+TRACE_SYSCALL_TABLE(sys_newuname, sys_newuname, 63, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_semop
-+TRACE_SYSCALL_TABLE(sys_semop, sys_semop, 65, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_shmdt
-+TRACE_SYSCALL_TABLE(sys_shmdt, sys_shmdt, 67, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_msgsnd
-+TRACE_SYSCALL_TABLE(sys_msgsnd, sys_msgsnd, 69, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_msgrcv
-+TRACE_SYSCALL_TABLE(sys_msgrcv, sys_msgrcv, 70, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_msgctl
-+TRACE_SYSCALL_TABLE(sys_msgctl, sys_msgctl, 71, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_truncate
-+TRACE_SYSCALL_TABLE(sys_truncate, sys_truncate, 76, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getdents
-+TRACE_SYSCALL_TABLE(sys_getdents, sys_getdents, 78, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getcwd
-+TRACE_SYSCALL_TABLE(sys_getcwd, sys_getcwd, 79, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_chdir
-+TRACE_SYSCALL_TABLE(sys_chdir, sys_chdir, 80, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_rename
-+TRACE_SYSCALL_TABLE(sys_rename, sys_rename, 82, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_mkdir
-+TRACE_SYSCALL_TABLE(sys_mkdir, sys_mkdir, 83, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_rmdir
-+TRACE_SYSCALL_TABLE(sys_rmdir, sys_rmdir, 84, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_creat
-+TRACE_SYSCALL_TABLE(sys_creat, sys_creat, 85, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_link
-+TRACE_SYSCALL_TABLE(sys_link, sys_link, 86, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_unlink
-+TRACE_SYSCALL_TABLE(sys_unlink, sys_unlink, 87, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_symlink
-+TRACE_SYSCALL_TABLE(sys_symlink, sys_symlink, 88, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_readlink
-+TRACE_SYSCALL_TABLE(sys_readlink, sys_readlink, 89, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_chmod
-+TRACE_SYSCALL_TABLE(sys_chmod, sys_chmod, 90, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_chown
-+TRACE_SYSCALL_TABLE(sys_chown, sys_chown, 92, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_lchown
-+TRACE_SYSCALL_TABLE(sys_lchown, sys_lchown, 94, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_gettimeofday
-+TRACE_SYSCALL_TABLE(sys_gettimeofday, sys_gettimeofday, 96, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getrlimit
-+TRACE_SYSCALL_TABLE(sys_getrlimit, sys_getrlimit, 97, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getrusage
-+TRACE_SYSCALL_TABLE(sys_getrusage, sys_getrusage, 98, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_sysinfo
-+TRACE_SYSCALL_TABLE(sys_sysinfo, sys_sysinfo, 99, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_times
-+TRACE_SYSCALL_TABLE(sys_times, sys_times, 100, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_syslog
-+TRACE_SYSCALL_TABLE(sys_syslog, sys_syslog, 103, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getgroups
-+TRACE_SYSCALL_TABLE(sys_getgroups, sys_getgroups, 115, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_setgroups
-+TRACE_SYSCALL_TABLE(sys_setgroups, sys_setgroups, 116, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getresuid
-+TRACE_SYSCALL_TABLE(sys_getresuid, sys_getresuid, 118, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getresgid
-+TRACE_SYSCALL_TABLE(sys_getresgid, sys_getresgid, 120, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_rt_sigpending
-+TRACE_SYSCALL_TABLE(sys_rt_sigpending, sys_rt_sigpending, 127, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_rt_sigtimedwait
-+TRACE_SYSCALL_TABLE(sys_rt_sigtimedwait, sys_rt_sigtimedwait, 128, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_rt_sigqueueinfo
-+TRACE_SYSCALL_TABLE(sys_rt_sigqueueinfo, sys_rt_sigqueueinfo, 129, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_rt_sigsuspend
-+TRACE_SYSCALL_TABLE(sys_rt_sigsuspend, sys_rt_sigsuspend, 130, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_utime
-+TRACE_SYSCALL_TABLE(sys_utime, sys_utime, 132, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_mknod
-+TRACE_SYSCALL_TABLE(sys_mknod, sys_mknod, 133, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_ustat
-+TRACE_SYSCALL_TABLE(sys_ustat, sys_ustat, 136, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_statfs
-+TRACE_SYSCALL_TABLE(sys_statfs, sys_statfs, 137, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_fstatfs
-+TRACE_SYSCALL_TABLE(sys_fstatfs, sys_fstatfs, 138, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_sched_setparam
-+TRACE_SYSCALL_TABLE(sys_sched_setparam, sys_sched_setparam, 142, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_sched_getparam
-+TRACE_SYSCALL_TABLE(sys_sched_getparam, sys_sched_getparam, 143, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_sched_setscheduler
-+TRACE_SYSCALL_TABLE(sys_sched_setscheduler, sys_sched_setscheduler, 144, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_sched_rr_get_interval
-+TRACE_SYSCALL_TABLE(sys_sched_rr_get_interval, sys_sched_rr_get_interval, 148, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_pivot_root
-+TRACE_SYSCALL_TABLE(sys_pivot_root, sys_pivot_root, 155, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_sysctl
-+TRACE_SYSCALL_TABLE(sys_sysctl, sys_sysctl, 156, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_adjtimex
-+TRACE_SYSCALL_TABLE(sys_adjtimex, sys_adjtimex, 159, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_setrlimit
-+TRACE_SYSCALL_TABLE(sys_setrlimit, sys_setrlimit, 160, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_chroot
-+TRACE_SYSCALL_TABLE(sys_chroot, sys_chroot, 161, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_settimeofday
-+TRACE_SYSCALL_TABLE(sys_settimeofday, sys_settimeofday, 164, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_mount
-+TRACE_SYSCALL_TABLE(sys_mount, sys_mount, 165, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_umount
-+TRACE_SYSCALL_TABLE(sys_umount, sys_umount, 166, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_swapon
-+TRACE_SYSCALL_TABLE(sys_swapon, sys_swapon, 167, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_swapoff
-+TRACE_SYSCALL_TABLE(sys_swapoff, sys_swapoff, 168, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_reboot
-+TRACE_SYSCALL_TABLE(sys_reboot, sys_reboot, 169, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_sethostname
-+TRACE_SYSCALL_TABLE(sys_sethostname, sys_sethostname, 170, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_setdomainname
-+TRACE_SYSCALL_TABLE(sys_setdomainname, sys_setdomainname, 171, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_init_module
-+TRACE_SYSCALL_TABLE(sys_init_module, sys_init_module, 175, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_delete_module
-+TRACE_SYSCALL_TABLE(sys_delete_module, sys_delete_module, 176, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_nfsservctl
-+TRACE_SYSCALL_TABLE(sys_nfsservctl, sys_nfsservctl, 180, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_setxattr
-+TRACE_SYSCALL_TABLE(sys_setxattr, sys_setxattr, 188, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_lsetxattr
-+TRACE_SYSCALL_TABLE(sys_lsetxattr, sys_lsetxattr, 189, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_fsetxattr
-+TRACE_SYSCALL_TABLE(sys_fsetxattr, sys_fsetxattr, 190, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getxattr
-+TRACE_SYSCALL_TABLE(sys_getxattr, sys_getxattr, 191, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_lgetxattr
-+TRACE_SYSCALL_TABLE(sys_lgetxattr, sys_lgetxattr, 192, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_fgetxattr
-+TRACE_SYSCALL_TABLE(sys_fgetxattr, sys_fgetxattr, 193, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_listxattr
-+TRACE_SYSCALL_TABLE(sys_listxattr, sys_listxattr, 194, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_llistxattr
-+TRACE_SYSCALL_TABLE(sys_llistxattr, sys_llistxattr, 195, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_flistxattr
-+TRACE_SYSCALL_TABLE(sys_flistxattr, sys_flistxattr, 196, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_removexattr
-+TRACE_SYSCALL_TABLE(sys_removexattr, sys_removexattr, 197, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_lremovexattr
-+TRACE_SYSCALL_TABLE(sys_lremovexattr, sys_lremovexattr, 198, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_fremovexattr
-+TRACE_SYSCALL_TABLE(sys_fremovexattr, sys_fremovexattr, 199, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_time
-+TRACE_SYSCALL_TABLE(sys_time, sys_time, 201, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_futex
-+TRACE_SYSCALL_TABLE(sys_futex, sys_futex, 202, 6)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_sched_setaffinity
-+TRACE_SYSCALL_TABLE(sys_sched_setaffinity, sys_sched_setaffinity, 203, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_sched_getaffinity
-+TRACE_SYSCALL_TABLE(sys_sched_getaffinity, sys_sched_getaffinity, 204, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_io_setup
-+TRACE_SYSCALL_TABLE(sys_io_setup, sys_io_setup, 206, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_io_getevents
-+TRACE_SYSCALL_TABLE(sys_io_getevents, sys_io_getevents, 208, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_io_submit
-+TRACE_SYSCALL_TABLE(sys_io_submit, sys_io_submit, 209, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_io_cancel
-+TRACE_SYSCALL_TABLE(sys_io_cancel, sys_io_cancel, 210, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_getdents64
-+TRACE_SYSCALL_TABLE(sys_getdents64, sys_getdents64, 217, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_set_tid_address
-+TRACE_SYSCALL_TABLE(sys_set_tid_address, sys_set_tid_address, 218, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_semtimedop
-+TRACE_SYSCALL_TABLE(sys_semtimedop, sys_semtimedop, 220, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_timer_create
-+TRACE_SYSCALL_TABLE(sys_timer_create, sys_timer_create, 222, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_timer_settime
-+TRACE_SYSCALL_TABLE(sys_timer_settime, sys_timer_settime, 223, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_timer_gettime
-+TRACE_SYSCALL_TABLE(sys_timer_gettime, sys_timer_gettime, 224, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_clock_settime
-+TRACE_SYSCALL_TABLE(sys_clock_settime, sys_clock_settime, 227, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_clock_gettime
-+TRACE_SYSCALL_TABLE(sys_clock_gettime, sys_clock_gettime, 228, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_clock_getres
-+TRACE_SYSCALL_TABLE(sys_clock_getres, sys_clock_getres, 229, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_clock_nanosleep
-+TRACE_SYSCALL_TABLE(sys_clock_nanosleep, sys_clock_nanosleep, 230, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_epoll_wait
-+TRACE_SYSCALL_TABLE(sys_epoll_wait, sys_epoll_wait, 232, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_epoll_ctl
-+TRACE_SYSCALL_TABLE(sys_epoll_ctl, sys_epoll_ctl, 233, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_utimes
-+TRACE_SYSCALL_TABLE(sys_utimes, sys_utimes, 235, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_mq_open
-+TRACE_SYSCALL_TABLE(sys_mq_open, sys_mq_open, 240, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_mq_unlink
-+TRACE_SYSCALL_TABLE(sys_mq_unlink, sys_mq_unlink, 241, 1)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_mq_timedsend
-+TRACE_SYSCALL_TABLE(sys_mq_timedsend, sys_mq_timedsend, 242, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_mq_timedreceive
-+TRACE_SYSCALL_TABLE(sys_mq_timedreceive, sys_mq_timedreceive, 243, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_mq_notify
-+TRACE_SYSCALL_TABLE(sys_mq_notify, sys_mq_notify, 244, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_mq_getsetattr
-+TRACE_SYSCALL_TABLE(sys_mq_getsetattr, sys_mq_getsetattr, 245, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_kexec_load
-+TRACE_SYSCALL_TABLE(sys_kexec_load, sys_kexec_load, 246, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_waitid
-+TRACE_SYSCALL_TABLE(sys_waitid, sys_waitid, 247, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_inotify_add_watch
-+TRACE_SYSCALL_TABLE(sys_inotify_add_watch, sys_inotify_add_watch, 254, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_openat
-+TRACE_SYSCALL_TABLE(sys_openat, sys_openat, 257, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_mkdirat
-+TRACE_SYSCALL_TABLE(sys_mkdirat, sys_mkdirat, 258, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_mknodat
-+TRACE_SYSCALL_TABLE(sys_mknodat, sys_mknodat, 259, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_fchownat
-+TRACE_SYSCALL_TABLE(sys_fchownat, sys_fchownat, 260, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_futimesat
-+TRACE_SYSCALL_TABLE(sys_futimesat, sys_futimesat, 261, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_newfstatat
-+TRACE_SYSCALL_TABLE(sys_newfstatat, sys_newfstatat, 262, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_unlinkat
-+TRACE_SYSCALL_TABLE(sys_unlinkat, sys_unlinkat, 263, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_renameat
-+TRACE_SYSCALL_TABLE(sys_renameat, sys_renameat, 264, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_linkat
-+TRACE_SYSCALL_TABLE(sys_linkat, sys_linkat, 265, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_symlinkat
-+TRACE_SYSCALL_TABLE(sys_symlinkat, sys_symlinkat, 266, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_readlinkat
-+TRACE_SYSCALL_TABLE(sys_readlinkat, sys_readlinkat, 267, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_fchmodat
-+TRACE_SYSCALL_TABLE(sys_fchmodat, sys_fchmodat, 268, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_faccessat
-+TRACE_SYSCALL_TABLE(sys_faccessat, sys_faccessat, 269, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_pselect6
-+TRACE_SYSCALL_TABLE(sys_pselect6, sys_pselect6, 270, 6)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_ppoll
-+TRACE_SYSCALL_TABLE(sys_ppoll, sys_ppoll, 271, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_set_robust_list
-+TRACE_SYSCALL_TABLE(sys_set_robust_list, sys_set_robust_list, 273, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_get_robust_list
-+TRACE_SYSCALL_TABLE(sys_get_robust_list, sys_get_robust_list, 274, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_splice
-+TRACE_SYSCALL_TABLE(sys_splice, sys_splice, 275, 6)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_vmsplice
-+TRACE_SYSCALL_TABLE(sys_vmsplice, sys_vmsplice, 278, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_utimensat
-+TRACE_SYSCALL_TABLE(sys_utimensat, sys_utimensat, 280, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_epoll_pwait
-+TRACE_SYSCALL_TABLE(sys_epoll_pwait, sys_epoll_pwait, 281, 6)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_signalfd
-+TRACE_SYSCALL_TABLE(sys_signalfd, sys_signalfd, 282, 3)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_timerfd_settime
-+TRACE_SYSCALL_TABLE(sys_timerfd_settime, sys_timerfd_settime, 286, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_timerfd_gettime
-+TRACE_SYSCALL_TABLE(sys_timerfd_gettime, sys_timerfd_gettime, 287, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_accept4
-+TRACE_SYSCALL_TABLE(sys_accept4, sys_accept4, 288, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_signalfd4
-+TRACE_SYSCALL_TABLE(sys_signalfd4, sys_signalfd4, 289, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_pipe2
-+TRACE_SYSCALL_TABLE(sys_pipe2, sys_pipe2, 293, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_preadv
-+TRACE_SYSCALL_TABLE(sys_preadv, sys_preadv, 295, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_pwritev
-+TRACE_SYSCALL_TABLE(sys_pwritev, sys_pwritev, 296, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_rt_tgsigqueueinfo
-+TRACE_SYSCALL_TABLE(sys_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, 297, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_perf_event_open
-+TRACE_SYSCALL_TABLE(sys_perf_event_open, sys_perf_event_open, 298, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_recvmmsg
-+TRACE_SYSCALL_TABLE(sys_recvmmsg, sys_recvmmsg, 299, 5)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_prlimit64
-+TRACE_SYSCALL_TABLE(sys_prlimit64, sys_prlimit64, 302, 4)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_clock_adjtime
-+TRACE_SYSCALL_TABLE(sys_clock_adjtime, sys_clock_adjtime, 305, 2)
-+#endif
-+#ifndef OVERRIDE_TABLE_64_sys_sendmmsg
-+TRACE_SYSCALL_TABLE(sys_sendmmsg, sys_sendmmsg, 307, 4)
-+#endif
-+
-+#endif /* CREATE_SYSCALL_TABLE */
-diff --git a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_pointers_override.h b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_pointers_override.h
-new file mode 100644
-index 0000000..0cdb32a
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_pointers_override.h
-@@ -0,0 +1,5 @@
-+#ifndef CREATE_SYSCALL_TABLE
-+
-+#else /* CREATE_SYSCALL_TABLE */
-+
-+#endif /* CREATE_SYSCALL_TABLE */
-diff --git a/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/Makefile b/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/Makefile
-new file mode 100644
-index 0000000..4beb88c
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/Makefile
-@@ -0,0 +1 @@
-+obj-m += lttng-syscalls-extractor.o
-diff --git a/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/lttng-syscalls-extractor.c b/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/lttng-syscalls-extractor.c
-new file mode 100644
-index 0000000..06c0da1
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/lttng-syscalls-extractor.c
-@@ -0,0 +1,85 @@
-+/*
-+ * Copyright 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * Copyright 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
-+ *
-+ * Dump syscall metadata to console.
-+ *
-+ * GPLv2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/kernel.h>
-+#include <linux/types.h>
-+#include <linux/list.h>
-+#include <linux/err.h>
-+#include <linux/slab.h>
-+#include <linux/kallsyms.h>
-+#include <linux/dcache.h>
-+#include <linux/ftrace_event.h>
-+#include <trace/syscall.h>
-+
-+#ifndef CONFIG_FTRACE_SYSCALLS
-+#error "You need to set CONFIG_FTRACE_SYSCALLS=y"
-+#endif
-+
-+#ifndef CONFIG_KALLSYMS_ALL
-+#error "You need to set CONFIG_KALLSYMS_ALL=y"
-+#endif
-+
-+static struct syscall_metadata **__start_syscalls_metadata;
-+static struct syscall_metadata **__stop_syscalls_metadata;
-+
-+static __init
-+struct syscall_metadata *find_syscall_meta(unsigned long syscall)
-+{
-+ struct syscall_metadata **iter;
-+
-+ for (iter = __start_syscalls_metadata;
-+ iter < __stop_syscalls_metadata; iter++) {
-+ if ((*iter)->syscall_nr == syscall)
-+ return (*iter);
-+ }
-+ return NULL;
-+}
-+
-+int init_module(void)
-+{
-+ struct syscall_metadata *meta;
-+ int i;
-+
-+ __start_syscalls_metadata = (void *) kallsyms_lookup_name("__start_syscalls_metadata");
-+ __stop_syscalls_metadata = (void *) kallsyms_lookup_name("__stop_syscalls_metadata");
-+
-+ for (i = 0; i < NR_syscalls; i++) {
-+ int j;
-+
-+ meta = find_syscall_meta(i);
-+ if (!meta)
-+ continue;
-+ printk("syscall %s nr %d nbargs %d ",
-+ meta->name, meta->syscall_nr, meta->nb_args);
-+ printk("types: (");
-+ for (j = 0; j < meta->nb_args; j++) {
-+ if (j > 0)
-+ printk(", ");
-+ printk("%s", meta->types[j]);
-+ }
-+ printk(") ");
-+ printk("args: (");
-+ for (j = 0; j < meta->nb_args; j++) {
-+ if (j > 0)
-+ printk(", ");
-+ printk("%s", meta->args[j]);
-+ }
-+ printk(")\n");
-+ }
-+ printk("SUCCESS\n");
-+
-+ return -1;
-+}
-+
-+void cleanup_module(void)
-+{
-+}
-+
-+MODULE_LICENSE("GPL");
-diff --git a/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-generate-headers.sh b/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-generate-headers.sh
-new file mode 100644
-index 0000000..5eddb27
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-generate-headers.sh
-@@ -0,0 +1,275 @@
-+#!/bin/sh
-+
-+# Generate system call probe description macros from syscall metadata dump file.
-+# example usage:
-+#
-+# lttng-syscalls-generate-headers.sh integers 3.0.4 x86-64-syscalls-3.0.4 64
-+# lttng-syscalls-generate-headers.sh pointers 3.0.4 x86-64-syscalls-3.0.4 64
-+
-+CLASS=$1
-+INPUTDIR=$2
-+INPUTFILE=$3
-+BITNESS=$4
-+INPUT=${INPUTDIR}/${INPUTFILE}
-+SRCFILE=gen.tmp.0
-+TMPFILE=gen.tmp.1
-+HEADER=headers/${INPUTFILE}_${CLASS}.h
-+
-+cp ${INPUT} ${SRCFILE}
-+
-+#Cleanup
-+perl -p -e 's/^\[.*\] //g' ${SRCFILE} > ${TMPFILE}
-+mv ${TMPFILE} ${SRCFILE}
-+
-+perl -p -e 's/^syscall sys_([^ ]*)/syscall $1/g' ${SRCFILE} > ${TMPFILE}
-+mv ${TMPFILE} ${SRCFILE}
-+
-+#Filter
-+
-+if [ "$CLASS" = integers ]; then
-+ #select integers and no-args.
-+ CLASSCAP=INTEGERS
-+ grep -v "\\*\|cap_user_header_t" ${SRCFILE} > ${TMPFILE}
-+ mv ${TMPFILE} ${SRCFILE}
-+fi
-+
-+
-+if [ "$CLASS" = pointers ]; then
-+ #select system calls using pointers.
-+ CLASSCAP=POINTERS
-+ grep "\\*\|cap_#user_header_t" ${SRCFILE} > ${TMPFILE}
-+ mv ${TMPFILE} ${SRCFILE}
-+fi
-+
-+echo "/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */" > ${HEADER}
-+
-+echo \
-+"#ifndef CREATE_SYSCALL_TABLE
-+
-+#if !defined(_TRACE_SYSCALLS_${CLASSCAP}_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_SYSCALLS_${CLASSCAP}_H
-+
-+#include <linux/tracepoint.h>
-+#include <linux/syscalls.h>
-+#include \"${INPUTFILE}_${CLASS}_override.h\"
-+#include \"syscalls_${CLASS}_override.h\"
-+" >> ${HEADER}
-+
-+if [ "$CLASS" = integers ]; then
-+
-+NRARGS=0
-+
-+echo \
-+'SC_DECLARE_EVENT_CLASS_NOARGS(syscalls_noargs,\n'\
-+' TP_STRUCT__entry(),\n'\
-+' TP_fast_assign(),\n'\
-+' TP_printk()\n'\
-+')'\
-+ >> ${HEADER}
-+
-+grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
-+perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
-+'types: \(([^)]*)\) '\
-+'args: \(([^)]*)\)/'\
-+'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
-+'SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_$1)\n'\
-+'#endif/g'\
-+ ${TMPFILE} >> ${HEADER}
-+
-+fi
-+
-+
-+# types: 4
-+# args 5
-+
-+NRARGS=1
-+grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
-+perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
-+'types: \(([^)]*)\) '\
-+'args: \(([^)]*)\)/'\
-+'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
-+'SC_TRACE_EVENT(sys_$1,\n'\
-+' TP_PROTO($4 $5),\n'\
-+' TP_ARGS($5),\n'\
-+' TP_STRUCT__entry(__field($4, $5)),\n'\
-+' TP_fast_assign(tp_assign($4, $5, $5)),\n'\
-+' TP_printk()\n'\
-+')\n'\
-+'#endif/g'\
-+ ${TMPFILE} >> ${HEADER}
-+
-+# types: 4 5
-+# args 6 7
-+
-+NRARGS=2
-+grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
-+perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
-+'types: \(([^,]*), ([^)]*)\) '\
-+'args: \(([^,]*), ([^)]*)\)/'\
-+'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
-+'SC_TRACE_EVENT(sys_$1,\n'\
-+' TP_PROTO($4 $6, $5 $7),\n'\
-+' TP_ARGS($6, $7),\n'\
-+' TP_STRUCT__entry(__field($4, $6) __field($5, $7)),\n'\
-+' TP_fast_assign(tp_assign($4, $6, $6) tp_assign($5, $7, $7)),\n'\
-+' TP_printk()\n'\
-+')\n'\
-+'#endif/g'\
-+ ${TMPFILE} >> ${HEADER}
-+
-+# types: 4 5 6
-+# args 7 8 9
-+
-+NRARGS=3
-+grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
-+perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
-+'types: \(([^,]*), ([^,]*), ([^)]*)\) '\
-+'args: \(([^,]*), ([^,]*), ([^)]*)\)/'\
-+'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
-+'SC_TRACE_EVENT(sys_$1,\n'\
-+' TP_PROTO($4 $7, $5 $8, $6 $9),\n'\
-+' TP_ARGS($7, $8, $9),\n'\
-+' TP_STRUCT__entry(__field($4, $7) __field($5, $8) __field($6, $9)),\n'\
-+' TP_fast_assign(tp_assign($4, $7, $7) tp_assign($5, $8, $8) tp_assign($6, $9, $9)),\n'\
-+' TP_printk()\n'\
-+')\n'\
-+'#endif/g'\
-+ ${TMPFILE} >> ${HEADER}
-+
-+
-+# types: 4 5 6 7
-+# args 8 9 10 11
-+
-+NRARGS=4
-+grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
-+perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
-+'types: \(([^,]*), ([^,]*), ([^,]*), ([^)]*)\) '\
-+'args: \(([^,]*), ([^,]*), ([^,]*), ([^)]*)\)/'\
-+'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
-+'SC_TRACE_EVENT(sys_$1,\n'\
-+' TP_PROTO($4 $8, $5 $9, $6 $10, $7 $11),\n'\
-+' TP_ARGS($8, $9, $10, $11),\n'\
-+' TP_STRUCT__entry(__field($4, $8) __field($5, $9) __field($6, $10) __field($7, $11)),\n'\
-+' TP_fast_assign(tp_assign($4, $8, $8) tp_assign($5, $9, $9) tp_assign($6, $10, $10) tp_assign($7, $11, $11)),\n'\
-+' TP_printk()\n'\
-+')\n'\
-+'#endif/g'\
-+ ${TMPFILE} >> ${HEADER}
-+
-+# types: 4 5 6 7 8
-+# args 9 10 11 12 13
-+
-+NRARGS=5
-+grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
-+perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
-+'types: \(([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^)]*)\) '\
-+'args: \(([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^)]*)\)/'\
-+'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
-+'SC_TRACE_EVENT(sys_$1,\n'\
-+' TP_PROTO($4 $9, $5 $10, $6 $11, $7 $12, $8 $13),\n'\
-+' TP_ARGS($9, $10, $11, $12, $13),\n'\
-+' TP_STRUCT__entry(__field($4, $9) __field($5, $10) __field($6, $11) __field($7, $12) __field($8, $13)),\n'\
-+' TP_fast_assign(tp_assign($4, $9, $9) tp_assign($5, $10, $10) tp_assign($6, $11, $11) tp_assign($7, $12, $12) tp_assign($8, $13, $13)),\n'\
-+' TP_printk()\n'\
-+')\n'\
-+'#endif/g'\
-+ ${TMPFILE} >> ${HEADER}
-+
-+
-+# types: 4 5 6 7 8 9
-+# args 10 11 12 13 14 15
-+
-+NRARGS=6
-+grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
-+perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
-+'types: \(([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^\)]*)\) '\
-+'args: \(([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^\)]*)\)/'\
-+'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
-+'SC_TRACE_EVENT(sys_$1,\n'\
-+' TP_PROTO($4 $10, $5 $11, $6 $12, $7 $13, $8 $14, $9 $15),\n'\
-+' TP_ARGS($10, $11, $12, $13, $14, $15),\n'\
-+' TP_STRUCT__entry(__field($4, $10) __field($5, $11) __field($6, $12) __field($7, $13) __field($8, $14) __field($9, $15)),\n'\
-+' TP_fast_assign(tp_assign($4, $10, $10) tp_assign($5, $11, $11) tp_assign($6, $12, $12) tp_assign($7, $13, $13) tp_assign($8, $14, $14) tp_assign($9, $15, $15)),\n'\
-+' TP_printk()\n'\
-+')\n'\
-+'#endif/g'\
-+ ${TMPFILE} >> ${HEADER}
-+
-+# Macro for tracing syscall table
-+
-+rm -f ${TMPFILE}
-+for NRARGS in $(seq 0 6); do
-+ grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} >> ${TMPFILE}
-+done
-+
-+echo \
-+"
-+#endif /* _TRACE_SYSCALLS_${CLASSCAP}_H */
-+
-+/* This part must be outside protection */
-+#include \"../../../probes/define_trace.h\"
-+
-+#else /* CREATE_SYSCALL_TABLE */
-+
-+#include \"${INPUTFILE}_${CLASS}_override.h\"
-+#include \"syscalls_${CLASS}_override.h\"
-+" >> ${HEADER}
-+
-+NRARGS=0
-+
-+if [ "$CLASS" = integers ]; then
-+#noargs
-+grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
-+perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) .*$/'\
-+'#ifndef OVERRIDE_TABLE_'"${BITNESS}"'_sys_$1\n'\
-+'TRACE_SYSCALL_TABLE\(syscalls_noargs, sys_$1, $2, $3\)\n'\
-+'#endif/g'\
-+ ${TMPFILE} >> ${HEADER}
-+fi
-+
-+#others.
-+grep -v "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
-+perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) .*$/'\
-+'#ifndef OVERRIDE_TABLE_'"${BITNESS}"'_sys_$1\n'\
-+'TRACE_SYSCALL_TABLE(sys_$1, sys_$1, $2, $3)\n'\
-+'#endif/g'\
-+ ${TMPFILE} >> ${HEADER}
-+
-+echo -n \
-+"
-+#endif /* CREATE_SYSCALL_TABLE */
-+" >> ${HEADER}
-+
-+#fields names: ...char * type with *name* or *file* or *path* or *root*
-+# or *put_old* or *type*
-+cp -f ${HEADER} ${TMPFILE}
-+rm -f ${HEADER}
-+perl -p -e 's/__field\(([^,)]*char \*), ([^\)]*)(name|file|path|root|put_old|type)([^\)]*)\)/__string_from_user($2$3$4, $2$3$4)/g'\
-+ ${TMPFILE} >> ${HEADER}
-+cp -f ${HEADER} ${TMPFILE}
-+rm -f ${HEADER}
-+perl -p -e 's/tp_assign\(([^,)]*char \*), ([^,]*)(name|file|path|root|put_old|type)([^,]*), ([^\)]*)\)/tp_copy_string_from_user($2$3$4, $5)/g'\
-+ ${TMPFILE} >> ${HEADER}
-+
-+#prettify addresses heuristics.
-+#field names with addr or ptr
-+cp -f ${HEADER} ${TMPFILE}
-+rm -f ${HEADER}
-+perl -p -e 's/__field\(([^,)]*), ([^,)]*addr|[^,)]*ptr)([^),]*)\)/__field_hex($1, $2$3)/g'\
-+ ${TMPFILE} >> ${HEADER}
-+
-+#field types ending with '*'
-+cp -f ${HEADER} ${TMPFILE}
-+rm -f ${HEADER}
-+perl -p -e 's/__field\(([^,)]*\*), ([^),]*)\)/__field_hex($1, $2)/g'\
-+ ${TMPFILE} >> ${HEADER}
-+
-+#strip the extra type information from tp_assign.
-+cp -f ${HEADER} ${TMPFILE}
-+rm -f ${HEADER}
-+perl -p -e 's/tp_assign\(([^,)]*), ([^,]*), ([^\)]*)\)/tp_assign($2, $3)/g'\
-+ ${TMPFILE} >> ${HEADER}
-+
-+rm -f ${INPUTFILE}.tmp
-+rm -f ${TMPFILE}
-+rm -f ${SRCFILE}
---
-1.7.9
-
diff --git a/patches.lttng/0009-lttng-lib-ring-buffer-clients.patch b/patches.lttng/0009-lttng-lib-ring-buffer-clients.patch
deleted file mode 100644
index 39826468e49..00000000000
--- a/patches.lttng/0009-lttng-lib-ring-buffer-clients.patch
+++ /dev/null
@@ -1,1106 +0,0 @@
-From 6857797120e99facc465a972026038199e4c2356 Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 28 Nov 2011 07:42:17 -0500
-Subject: lttng: lib ring buffer clients
-
-Each lttng buffer configuration (discard mode, overwrite mode, mmap
-support, splice support, per-cpu buffers, global buffer for metadata) is
-a lib ring buffer client.
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- .../staging/lttng/ltt-ring-buffer-client-discard.c | 21 +
- .../lttng/ltt-ring-buffer-client-mmap-discard.c | 21 +
- .../lttng/ltt-ring-buffer-client-mmap-overwrite.c | 21 +
- .../lttng/ltt-ring-buffer-client-overwrite.c | 21 +
- drivers/staging/lttng/ltt-ring-buffer-client.h | 569 ++++++++++++++++++++
- .../lttng/ltt-ring-buffer-metadata-client.c | 21 +
- .../lttng/ltt-ring-buffer-metadata-client.h | 330 ++++++++++++
- .../lttng/ltt-ring-buffer-metadata-mmap-client.c | 21 +
- 8 files changed, 1025 insertions(+), 0 deletions(-)
- create mode 100644 drivers/staging/lttng/ltt-ring-buffer-client-discard.c
- create mode 100644 drivers/staging/lttng/ltt-ring-buffer-client-mmap-discard.c
- create mode 100644 drivers/staging/lttng/ltt-ring-buffer-client-mmap-overwrite.c
- create mode 100644 drivers/staging/lttng/ltt-ring-buffer-client-overwrite.c
- create mode 100644 drivers/staging/lttng/ltt-ring-buffer-client.h
- create mode 100644 drivers/staging/lttng/ltt-ring-buffer-metadata-client.c
- create mode 100644 drivers/staging/lttng/ltt-ring-buffer-metadata-client.h
- create mode 100644 drivers/staging/lttng/ltt-ring-buffer-metadata-mmap-client.c
-
-diff --git a/drivers/staging/lttng/ltt-ring-buffer-client-discard.c b/drivers/staging/lttng/ltt-ring-buffer-client-discard.c
-new file mode 100644
-index 0000000..eafcf45
---- /dev/null
-+++ b/drivers/staging/lttng/ltt-ring-buffer-client-discard.c
-@@ -0,0 +1,21 @@
-+/*
-+ * ltt-ring-buffer-client-discard.c
-+ *
-+ * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng lib ring buffer client (discard mode).
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include "ltt-tracer.h"
-+
-+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
-+#define RING_BUFFER_MODE_TEMPLATE_STRING "discard"
-+#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_SPLICE
-+#include "ltt-ring-buffer-client.h"
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("LTTng Ring Buffer Client Discard Mode");
-diff --git a/drivers/staging/lttng/ltt-ring-buffer-client-mmap-discard.c b/drivers/staging/lttng/ltt-ring-buffer-client-mmap-discard.c
-new file mode 100644
-index 0000000..29819a7
---- /dev/null
-+++ b/drivers/staging/lttng/ltt-ring-buffer-client-mmap-discard.c
-@@ -0,0 +1,21 @@
-+/*
-+ * ltt-ring-buffer-client-discard.c
-+ *
-+ * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng lib ring buffer client (discard mode).
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include "ltt-tracer.h"
-+
-+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
-+#define RING_BUFFER_MODE_TEMPLATE_STRING "discard-mmap"
-+#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_MMAP
-+#include "ltt-ring-buffer-client.h"
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("LTTng Ring Buffer Client Discard Mode");
-diff --git a/drivers/staging/lttng/ltt-ring-buffer-client-mmap-overwrite.c b/drivers/staging/lttng/ltt-ring-buffer-client-mmap-overwrite.c
-new file mode 100644
-index 0000000..741aa7b
---- /dev/null
-+++ b/drivers/staging/lttng/ltt-ring-buffer-client-mmap-overwrite.c
-@@ -0,0 +1,21 @@
-+/*
-+ * ltt-ring-buffer-client-overwrite.c
-+ *
-+ * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng lib ring buffer client (overwrite mode).
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include "ltt-tracer.h"
-+
-+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_OVERWRITE
-+#define RING_BUFFER_MODE_TEMPLATE_STRING "overwrite-mmap"
-+#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_MMAP
-+#include "ltt-ring-buffer-client.h"
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("LTTng Ring Buffer Client Overwrite Mode");
-diff --git a/drivers/staging/lttng/ltt-ring-buffer-client-overwrite.c b/drivers/staging/lttng/ltt-ring-buffer-client-overwrite.c
-new file mode 100644
-index 0000000..9811941
---- /dev/null
-+++ b/drivers/staging/lttng/ltt-ring-buffer-client-overwrite.c
-@@ -0,0 +1,21 @@
-+/*
-+ * ltt-ring-buffer-client-overwrite.c
-+ *
-+ * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng lib ring buffer client (overwrite mode).
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include "ltt-tracer.h"
-+
-+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_OVERWRITE
-+#define RING_BUFFER_MODE_TEMPLATE_STRING "overwrite"
-+#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_SPLICE
-+#include "ltt-ring-buffer-client.h"
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("LTTng Ring Buffer Client Overwrite Mode");
-diff --git a/drivers/staging/lttng/ltt-ring-buffer-client.h b/drivers/staging/lttng/ltt-ring-buffer-client.h
-new file mode 100644
-index 0000000..8df3790
---- /dev/null
-+++ b/drivers/staging/lttng/ltt-ring-buffer-client.h
-@@ -0,0 +1,569 @@
-+/*
-+ * ltt-ring-buffer-client.h
-+ *
-+ * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng lib ring buffer client template.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/types.h>
-+#include "lib/bitfield.h"
-+#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
-+#include "wrapper/trace-clock.h"
-+#include "ltt-events.h"
-+#include "ltt-tracer.h"
-+#include "wrapper/ringbuffer/frontend_types.h"
-+
-+/*
-+ * Keep the natural field alignment for _each field_ within this structure if
-+ * you ever add/remove a field from this header. Packed attribute is not used
-+ * because gcc generates poor code on at least powerpc and mips. Don't ever
-+ * let gcc add padding between the structure elements.
-+ *
-+ * The guarantee we have with timestamps is that all the events in a
-+ * packet are included (inclusive) within the begin/end timestamps of
-+ * the packet. Another guarantee we have is that the "timestamp begin",
-+ * as well as the event timestamps, are monotonically increasing (never
-+ * decrease) when moving forward in a stream (physically). But this
-+ * guarantee does not apply to "timestamp end", because it is sampled at
-+ * commit time, which is not ordered with respect to space reservation.
-+ */
-+
-+struct packet_header {
-+ /* Trace packet header */
-+ uint32_t magic; /*
-+ * Trace magic number.
-+ * contains endianness information.
-+ */
-+ uint8_t uuid[16];
-+ uint32_t stream_id;
-+
-+ struct {
-+ /* Stream packet context */
-+ uint64_t timestamp_begin; /* Cycle count at subbuffer start */
-+ uint64_t timestamp_end; /* Cycle count at subbuffer end */
-+ uint32_t events_discarded; /*
-+ * Events lost in this subbuffer since
-+ * the beginning of the trace.
-+ * (may overflow)
-+ */
-+ uint32_t content_size; /* Size of data in subbuffer */
-+ uint32_t packet_size; /* Subbuffer size (include padding) */
-+ uint32_t cpu_id; /* CPU id associated with stream */
-+ uint8_t header_end; /* End of header */
-+ } ctx;
-+};
-+
-+
-+static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
-+{
-+ return trace_clock_read64();
-+}
-+
-+static inline
-+size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
-+{
-+ int i;
-+ size_t orig_offset = offset;
-+
-+ if (likely(!ctx))
-+ return 0;
-+ for (i = 0; i < ctx->nr_fields; i++)
-+ offset += ctx->fields[i].get_size(offset);
-+ return offset - orig_offset;
-+}
-+
-+static inline
-+void ctx_record(struct lib_ring_buffer_ctx *bufctx,
-+ struct ltt_channel *chan,
-+ struct lttng_ctx *ctx)
-+{
-+ int i;
-+
-+ if (likely(!ctx))
-+ return;
-+ for (i = 0; i < ctx->nr_fields; i++)
-+ ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
-+}
-+
-+/*
-+ * record_header_size - Calculate the header size and padding necessary.
-+ * @config: ring buffer instance configuration
-+ * @chan: channel
-+ * @offset: offset in the write buffer
-+ * @pre_header_padding: padding to add before the header (output)
-+ * @ctx: reservation context
-+ *
-+ * Returns the event header size (including padding).
-+ *
-+ * The payload must itself determine its own alignment from the biggest type it
-+ * contains.
-+ */
-+static __inline__
-+unsigned char record_header_size(const struct lib_ring_buffer_config *config,
-+ struct channel *chan, size_t offset,
-+ size_t *pre_header_padding,
-+ struct lib_ring_buffer_ctx *ctx)
-+{
-+ struct ltt_channel *ltt_chan = channel_get_private(chan);
-+ struct ltt_event *event = ctx->priv;
-+ size_t orig_offset = offset;
-+ size_t padding;
-+
-+ switch (ltt_chan->header_type) {
-+ case 1: /* compact */
-+ padding = lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
-+ offset += padding;
-+ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
-+ offset += sizeof(uint32_t); /* id and timestamp */
-+ } else {
-+ /* Minimum space taken by 5-bit id */
-+ offset += sizeof(uint8_t);
-+ /* Align extended struct on largest member */
-+ offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
-+ offset += sizeof(uint32_t); /* id */
-+ offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
-+ offset += sizeof(uint64_t); /* timestamp */
-+ }
-+ break;
-+ case 2: /* large */
-+ padding = lib_ring_buffer_align(offset, ltt_alignof(uint16_t));
-+ offset += padding;
-+ offset += sizeof(uint16_t);
-+ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
-+ offset += lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
-+ offset += sizeof(uint32_t); /* timestamp */
-+ } else {
-+ /* Align extended struct on largest member */
-+ offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
-+ offset += sizeof(uint32_t); /* id */
-+ offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
-+ offset += sizeof(uint64_t); /* timestamp */
-+ }
-+ break;
-+ default:
-+ padding = 0;
-+ WARN_ON_ONCE(1);
-+ }
-+ offset += ctx_get_size(offset, event->ctx);
-+ offset += ctx_get_size(offset, ltt_chan->ctx);
-+
-+ *pre_header_padding = padding;
-+ return offset - orig_offset;
-+}
-+
-+#include "wrapper/ringbuffer/api.h"
-+
-+static
-+void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_ctx *ctx,
-+ uint32_t event_id);
-+
-+/*
-+ * ltt_write_event_header
-+ *
-+ * Writes the event header to the offset (already aligned on 32-bits).
-+ *
-+ * @config: ring buffer instance configuration
-+ * @ctx: reservation context
-+ * @event_id: event ID
-+ */
-+static __inline__
-+void ltt_write_event_header(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_ctx *ctx,
-+ uint32_t event_id)
-+{
-+ struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
-+ struct ltt_event *event = ctx->priv;
-+
-+ if (unlikely(ctx->rflags))
-+ goto slow_path;
-+
-+ switch (ltt_chan->header_type) {
-+ case 1: /* compact */
-+ {
-+ uint32_t id_time = 0;
-+
-+ bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id);
-+ bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
-+ lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
-+ break;
-+ }
-+ case 2: /* large */
-+ {
-+ uint32_t timestamp = (uint32_t) ctx->tsc;
-+ uint16_t id = event_id;
-+
-+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
-+ lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
-+ lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
-+ break;
-+ }
-+ default:
-+ WARN_ON_ONCE(1);
-+ }
-+
-+ ctx_record(ctx, ltt_chan, ltt_chan->ctx);
-+ ctx_record(ctx, ltt_chan, event->ctx);
-+ lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
-+
-+ return;
-+
-+slow_path:
-+ ltt_write_event_header_slow(config, ctx, event_id);
-+}
-+
-+static
-+void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_ctx *ctx,
-+ uint32_t event_id)
-+{
-+ struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
-+ struct ltt_event *event = ctx->priv;
-+
-+ switch (ltt_chan->header_type) {
-+ case 1: /* compact */
-+ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
-+ uint32_t id_time = 0;
-+
-+ bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id);
-+ bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
-+ lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
-+ } else {
-+ uint8_t id = 0;
-+ uint64_t timestamp = ctx->tsc;
-+
-+ bt_bitfield_write(&id, uint8_t, 0, 5, 31);
-+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
-+ /* Align extended struct on largest member */
-+ lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
-+ lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
-+ lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
-+ lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
-+ }
-+ break;
-+ case 2: /* large */
-+ {
-+ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
-+ uint32_t timestamp = (uint32_t) ctx->tsc;
-+ uint16_t id = event_id;
-+
-+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
-+ lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
-+ lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
-+ } else {
-+ uint16_t id = 65535;
-+ uint64_t timestamp = ctx->tsc;
-+
-+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
-+ /* Align extended struct on largest member */
-+ lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
-+ lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
-+ lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
-+ lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
-+ }
-+ break;
-+ }
-+ default:
-+ WARN_ON_ONCE(1);
-+ }
-+ ctx_record(ctx, ltt_chan, ltt_chan->ctx);
-+ ctx_record(ctx, ltt_chan, event->ctx);
-+ lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
-+}
-+
-+static const struct lib_ring_buffer_config client_config;
-+
-+static u64 client_ring_buffer_clock_read(struct channel *chan)
-+{
-+ return lib_ring_buffer_clock_read(chan);
-+}
-+
-+static
-+size_t client_record_header_size(const struct lib_ring_buffer_config *config,
-+ struct channel *chan, size_t offset,
-+ size_t *pre_header_padding,
-+ struct lib_ring_buffer_ctx *ctx)
-+{
-+ return record_header_size(config, chan, offset,
-+ pre_header_padding, ctx);
-+}
-+
-+/**
-+ * client_packet_header_size - called on buffer-switch to a new sub-buffer
-+ *
-+ * Return header size without padding after the structure. Don't use packed
-+ * structure because gcc generates inefficient code on some architectures
-+ * (powerpc, mips..)
-+ */
-+static size_t client_packet_header_size(void)
-+{
-+ return offsetof(struct packet_header, ctx.header_end);
-+}
-+
-+static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
-+ unsigned int subbuf_idx)
-+{
-+ struct channel *chan = buf->backend.chan;
-+ struct packet_header *header =
-+ (struct packet_header *)
-+ lib_ring_buffer_offset_address(&buf->backend,
-+ subbuf_idx * chan->backend.subbuf_size);
-+ struct ltt_channel *ltt_chan = channel_get_private(chan);
-+ struct ltt_session *session = ltt_chan->session;
-+
-+ header->magic = CTF_MAGIC_NUMBER;
-+ memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
-+ header->stream_id = ltt_chan->id;
-+ header->ctx.timestamp_begin = tsc;
-+ header->ctx.timestamp_end = 0;
-+ header->ctx.events_discarded = 0;
-+ header->ctx.content_size = 0xFFFFFFFF; /* for debugging */
-+ header->ctx.packet_size = 0xFFFFFFFF;
-+ header->ctx.cpu_id = buf->backend.cpu;
-+}
-+
-+/*
-+ * offset is assumed to never be 0 here : never deliver a completely empty
-+ * subbuffer. data_size is between 1 and subbuf_size.
-+ */
-+static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
-+ unsigned int subbuf_idx, unsigned long data_size)
-+{
-+ struct channel *chan = buf->backend.chan;
-+ struct packet_header *header =
-+ (struct packet_header *)
-+ lib_ring_buffer_offset_address(&buf->backend,
-+ subbuf_idx * chan->backend.subbuf_size);
-+ unsigned long records_lost = 0;
-+
-+ header->ctx.timestamp_end = tsc;
-+ header->ctx.content_size = data_size * CHAR_BIT; /* in bits */
-+ header->ctx.packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
-+ records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
-+ records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
-+ records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
-+ header->ctx.events_discarded = records_lost;
-+}
-+
-+static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
-+ int cpu, const char *name)
-+{
-+ return 0;
-+}
-+
-+static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
-+{
-+}
-+
-+static const struct lib_ring_buffer_config client_config = {
-+ .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
-+ .cb.record_header_size = client_record_header_size,
-+ .cb.subbuffer_header_size = client_packet_header_size,
-+ .cb.buffer_begin = client_buffer_begin,
-+ .cb.buffer_end = client_buffer_end,
-+ .cb.buffer_create = client_buffer_create,
-+ .cb.buffer_finalize = client_buffer_finalize,
-+
-+ .tsc_bits = 32,
-+ .alloc = RING_BUFFER_ALLOC_PER_CPU,
-+ .sync = RING_BUFFER_SYNC_PER_CPU,
-+ .mode = RING_BUFFER_MODE_TEMPLATE,
-+ .backend = RING_BUFFER_PAGE,
-+ .output = RING_BUFFER_OUTPUT_TEMPLATE,
-+ .oops = RING_BUFFER_OOPS_CONSISTENCY,
-+ .ipi = RING_BUFFER_IPI_BARRIER,
-+ .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
-+};
-+
-+static
-+struct channel *_channel_create(const char *name,
-+ struct ltt_channel *ltt_chan, void *buf_addr,
-+ size_t subbuf_size, size_t num_subbuf,
-+ unsigned int switch_timer_interval,
-+ unsigned int read_timer_interval)
-+{
-+ return channel_create(&client_config, name, ltt_chan, buf_addr,
-+ subbuf_size, num_subbuf, switch_timer_interval,
-+ read_timer_interval);
-+}
-+
-+static
-+void ltt_channel_destroy(struct channel *chan)
-+{
-+ channel_destroy(chan);
-+}
-+
-+static
-+struct lib_ring_buffer *ltt_buffer_read_open(struct channel *chan)
-+{
-+ struct lib_ring_buffer *buf;
-+ int cpu;
-+
-+ for_each_channel_cpu(cpu, chan) {
-+ buf = channel_get_ring_buffer(&client_config, chan, cpu);
-+ if (!lib_ring_buffer_open_read(buf))
-+ return buf;
-+ }
-+ return NULL;
-+}
-+
-+static
-+int ltt_buffer_has_read_closed_stream(struct channel *chan)
-+{
-+ struct lib_ring_buffer *buf;
-+ int cpu;
-+
-+ for_each_channel_cpu(cpu, chan) {
-+ buf = channel_get_ring_buffer(&client_config, chan, cpu);
-+ if (!atomic_long_read(&buf->active_readers))
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static
-+void ltt_buffer_read_close(struct lib_ring_buffer *buf)
-+{
-+ lib_ring_buffer_release_read(buf);
-+}
-+
-+static
-+int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx,
-+ uint32_t event_id)
-+{
-+ struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
-+ int ret, cpu;
-+
-+ cpu = lib_ring_buffer_get_cpu(&client_config);
-+ if (cpu < 0)
-+ return -EPERM;
-+ ctx->cpu = cpu;
-+
-+ switch (ltt_chan->header_type) {
-+ case 1: /* compact */
-+ if (event_id > 30)
-+ ctx->rflags |= LTT_RFLAG_EXTENDED;
-+ break;
-+ case 2: /* large */
-+ if (event_id > 65534)
-+ ctx->rflags |= LTT_RFLAG_EXTENDED;
-+ break;
-+ default:
-+ WARN_ON_ONCE(1);
-+ }
-+
-+ ret = lib_ring_buffer_reserve(&client_config, ctx);
-+ if (ret)
-+ goto put;
-+ ltt_write_event_header(&client_config, ctx, event_id);
-+ return 0;
-+put:
-+ lib_ring_buffer_put_cpu(&client_config);
-+ return ret;
-+}
-+
-+static
-+void ltt_event_commit(struct lib_ring_buffer_ctx *ctx)
-+{
-+ lib_ring_buffer_commit(&client_config, ctx);
-+ lib_ring_buffer_put_cpu(&client_config);
-+}
-+
-+static
-+void ltt_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
-+ size_t len)
-+{
-+ lib_ring_buffer_write(&client_config, ctx, src, len);
-+}
-+
-+static
-+void ltt_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
-+ const void __user *src, size_t len)
-+{
-+ lib_ring_buffer_copy_from_user(&client_config, ctx, src, len);
-+}
-+
-+static
-+void ltt_event_memset(struct lib_ring_buffer_ctx *ctx,
-+ int c, size_t len)
-+{
-+ lib_ring_buffer_memset(&client_config, ctx, c, len);
-+}
-+
-+static
-+wait_queue_head_t *ltt_get_writer_buf_wait_queue(struct channel *chan, int cpu)
-+{
-+ struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
-+ chan, cpu);
-+ return &buf->write_wait;
-+}
-+
-+static
-+wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
-+{
-+ return &chan->hp_wait;
-+}
-+
-+static
-+int ltt_is_finalized(struct channel *chan)
-+{
-+ return lib_ring_buffer_channel_is_finalized(chan);
-+}
-+
-+static
-+int ltt_is_disabled(struct channel *chan)
-+{
-+ return lib_ring_buffer_channel_is_disabled(chan);
-+}
-+
-+static struct ltt_transport ltt_relay_transport = {
-+ .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
-+ .owner = THIS_MODULE,
-+ .ops = {
-+ .channel_create = _channel_create,
-+ .channel_destroy = ltt_channel_destroy,
-+ .buffer_read_open = ltt_buffer_read_open,
-+ .buffer_has_read_closed_stream =
-+ ltt_buffer_has_read_closed_stream,
-+ .buffer_read_close = ltt_buffer_read_close,
-+ .event_reserve = ltt_event_reserve,
-+ .event_commit = ltt_event_commit,
-+ .event_write = ltt_event_write,
-+ .event_write_from_user = ltt_event_write_from_user,
-+ .event_memset = ltt_event_memset,
-+ .packet_avail_size = NULL, /* Would be racy anyway */
-+ .get_writer_buf_wait_queue = ltt_get_writer_buf_wait_queue,
-+ .get_hp_wait_queue = ltt_get_hp_wait_queue,
-+ .is_finalized = ltt_is_finalized,
-+ .is_disabled = ltt_is_disabled,
-+ },
-+};
-+
-+static int __init ltt_ring_buffer_client_init(void)
-+{
-+ /*
-+ * This vmalloc sync all also takes care of the lib ring buffer
-+ * vmalloc'd module pages when it is built as a module into LTTng.
-+ */
-+ wrapper_vmalloc_sync_all();
-+ ltt_transport_register(&ltt_relay_transport);
-+ return 0;
-+}
-+
-+module_init(ltt_ring_buffer_client_init);
-+
-+static void __exit ltt_ring_buffer_client_exit(void)
-+{
-+ ltt_transport_unregister(&ltt_relay_transport);
-+}
-+
-+module_exit(ltt_ring_buffer_client_exit);
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
-+ " client");
-diff --git a/drivers/staging/lttng/ltt-ring-buffer-metadata-client.c b/drivers/staging/lttng/ltt-ring-buffer-metadata-client.c
-new file mode 100644
-index 0000000..ac6fe78
---- /dev/null
-+++ b/drivers/staging/lttng/ltt-ring-buffer-metadata-client.c
-@@ -0,0 +1,21 @@
-+/*
-+ * ltt-ring-buffer-metadata-client.c
-+ *
-+ * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng lib ring buffer metadta client.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include "ltt-tracer.h"
-+
-+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
-+#define RING_BUFFER_MODE_TEMPLATE_STRING "metadata"
-+#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_SPLICE
-+#include "ltt-ring-buffer-metadata-client.h"
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("LTTng Ring Buffer Metadata Client");
-diff --git a/drivers/staging/lttng/ltt-ring-buffer-metadata-client.h b/drivers/staging/lttng/ltt-ring-buffer-metadata-client.h
-new file mode 100644
-index 0000000..529bbb1
---- /dev/null
-+++ b/drivers/staging/lttng/ltt-ring-buffer-metadata-client.h
-@@ -0,0 +1,330 @@
-+/*
-+ * ltt-ring-buffer-client.h
-+ *
-+ * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng lib ring buffer client template.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/types.h>
-+#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
-+#include "ltt-events.h"
-+#include "ltt-tracer.h"
-+
-+struct metadata_packet_header {
-+ uint32_t magic; /* 0x75D11D57 */
-+ uint8_t uuid[16]; /* Unique Universal Identifier */
-+ uint32_t checksum; /* 0 if unused */
-+ uint32_t content_size; /* in bits */
-+ uint32_t packet_size; /* in bits */
-+ uint8_t compression_scheme; /* 0 if unused */
-+ uint8_t encryption_scheme; /* 0 if unused */
-+ uint8_t checksum_scheme; /* 0 if unused */
-+ uint8_t major; /* CTF spec major version number */
-+ uint8_t minor; /* CTF spec minor version number */
-+ uint8_t header_end[0];
-+};
-+
-+struct metadata_record_header {
-+ uint8_t header_end[0]; /* End of header */
-+};
-+
-+static const struct lib_ring_buffer_config client_config;
-+
-+static inline
-+u64 lib_ring_buffer_clock_read(struct channel *chan)
-+{
-+ return 0;
-+}
-+
-+static inline
-+unsigned char record_header_size(const struct lib_ring_buffer_config *config,
-+ struct channel *chan, size_t offset,
-+ size_t *pre_header_padding,
-+ struct lib_ring_buffer_ctx *ctx)
-+{
-+ return 0;
-+}
-+
-+#include "wrapper/ringbuffer/api.h"
-+
-+static u64 client_ring_buffer_clock_read(struct channel *chan)
-+{
-+ return 0;
-+}
-+
-+static
-+size_t client_record_header_size(const struct lib_ring_buffer_config *config,
-+ struct channel *chan, size_t offset,
-+ size_t *pre_header_padding,
-+ struct lib_ring_buffer_ctx *ctx)
-+{
-+ return 0;
-+}
-+
-+/**
-+ * client_packet_header_size - called on buffer-switch to a new sub-buffer
-+ *
-+ * Return header size without padding after the structure. Don't use packed
-+ * structure because gcc generates inefficient code on some architectures
-+ * (powerpc, mips..)
-+ */
-+static size_t client_packet_header_size(void)
-+{
-+ return offsetof(struct metadata_packet_header, header_end);
-+}
-+
-+static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
-+ unsigned int subbuf_idx)
-+{
-+ struct channel *chan = buf->backend.chan;
-+ struct metadata_packet_header *header =
-+ (struct metadata_packet_header *)
-+ lib_ring_buffer_offset_address(&buf->backend,
-+ subbuf_idx * chan->backend.subbuf_size);
-+ struct ltt_channel *ltt_chan = channel_get_private(chan);
-+ struct ltt_session *session = ltt_chan->session;
-+
-+ header->magic = TSDL_MAGIC_NUMBER;
-+ memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
-+ header->checksum = 0; /* 0 if unused */
-+ header->content_size = 0xFFFFFFFF; /* in bits, for debugging */
-+ header->packet_size = 0xFFFFFFFF; /* in bits, for debugging */
-+ header->compression_scheme = 0; /* 0 if unused */
-+ header->encryption_scheme = 0; /* 0 if unused */
-+ header->checksum_scheme = 0; /* 0 if unused */
-+ header->major = CTF_SPEC_MAJOR;
-+ header->minor = CTF_SPEC_MINOR;
-+}
-+
-+/*
-+ * offset is assumed to never be 0 here : never deliver a completely empty
-+ * subbuffer. data_size is between 1 and subbuf_size.
-+ */
-+static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
-+ unsigned int subbuf_idx, unsigned long data_size)
-+{
-+ struct channel *chan = buf->backend.chan;
-+ struct metadata_packet_header *header =
-+ (struct metadata_packet_header *)
-+ lib_ring_buffer_offset_address(&buf->backend,
-+ subbuf_idx * chan->backend.subbuf_size);
-+ unsigned long records_lost = 0;
-+
-+ header->content_size = data_size * CHAR_BIT; /* in bits */
-+ header->packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
-+ /*
-+ * We do not care about the records lost count, because the metadata
-+ * channel waits and retry.
-+ */
-+ (void) lib_ring_buffer_get_records_lost_full(&client_config, buf);
-+ records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
-+ records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
-+ WARN_ON_ONCE(records_lost != 0);
-+}
-+
-+static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
-+ int cpu, const char *name)
-+{
-+ return 0;
-+}
-+
-+static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
-+{
-+}
-+
-+static const struct lib_ring_buffer_config client_config = {
-+ .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
-+ .cb.record_header_size = client_record_header_size,
-+ .cb.subbuffer_header_size = client_packet_header_size,
-+ .cb.buffer_begin = client_buffer_begin,
-+ .cb.buffer_end = client_buffer_end,
-+ .cb.buffer_create = client_buffer_create,
-+ .cb.buffer_finalize = client_buffer_finalize,
-+
-+ .tsc_bits = 0,
-+ .alloc = RING_BUFFER_ALLOC_GLOBAL,
-+ .sync = RING_BUFFER_SYNC_GLOBAL,
-+ .mode = RING_BUFFER_MODE_TEMPLATE,
-+ .backend = RING_BUFFER_PAGE,
-+ .output = RING_BUFFER_OUTPUT_TEMPLATE,
-+ .oops = RING_BUFFER_OOPS_CONSISTENCY,
-+ .ipi = RING_BUFFER_IPI_BARRIER,
-+ .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
-+};
-+
-+static
-+struct channel *_channel_create(const char *name,
-+ struct ltt_channel *ltt_chan, void *buf_addr,
-+ size_t subbuf_size, size_t num_subbuf,
-+ unsigned int switch_timer_interval,
-+ unsigned int read_timer_interval)
-+{
-+ return channel_create(&client_config, name, ltt_chan, buf_addr,
-+ subbuf_size, num_subbuf, switch_timer_interval,
-+ read_timer_interval);
-+}
-+
-+static
-+void ltt_channel_destroy(struct channel *chan)
-+{
-+ channel_destroy(chan);
-+}
-+
-+static
-+struct lib_ring_buffer *ltt_buffer_read_open(struct channel *chan)
-+{
-+ struct lib_ring_buffer *buf;
-+
-+ buf = channel_get_ring_buffer(&client_config, chan, 0);
-+ if (!lib_ring_buffer_open_read(buf))
-+ return buf;
-+ return NULL;
-+}
-+
-+static
-+int ltt_buffer_has_read_closed_stream(struct channel *chan)
-+{
-+ struct lib_ring_buffer *buf;
-+ int cpu;
-+
-+ for_each_channel_cpu(cpu, chan) {
-+ buf = channel_get_ring_buffer(&client_config, chan, cpu);
-+ if (!atomic_long_read(&buf->active_readers))
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static
-+void ltt_buffer_read_close(struct lib_ring_buffer *buf)
-+{
-+ lib_ring_buffer_release_read(buf);
-+}
-+
-+static
-+int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id)
-+{
-+ return lib_ring_buffer_reserve(&client_config, ctx);
-+}
-+
-+static
-+void ltt_event_commit(struct lib_ring_buffer_ctx *ctx)
-+{
-+ lib_ring_buffer_commit(&client_config, ctx);
-+}
-+
-+static
-+void ltt_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
-+ size_t len)
-+{
-+ lib_ring_buffer_write(&client_config, ctx, src, len);
-+}
-+
-+static
-+void ltt_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
-+ const void __user *src, size_t len)
-+{
-+ lib_ring_buffer_copy_from_user(&client_config, ctx, src, len);
-+}
-+
-+static
-+void ltt_event_memset(struct lib_ring_buffer_ctx *ctx,
-+ int c, size_t len)
-+{
-+ lib_ring_buffer_memset(&client_config, ctx, c, len);
-+}
-+
-+static
-+size_t ltt_packet_avail_size(struct channel *chan)
-+
-+{
-+ unsigned long o_begin;
-+ struct lib_ring_buffer *buf;
-+
-+ buf = chan->backend.buf; /* Only for global buffer ! */
-+ o_begin = v_read(&client_config, &buf->offset);
-+ if (subbuf_offset(o_begin, chan) != 0) {
-+ return chan->backend.subbuf_size - subbuf_offset(o_begin, chan);
-+ } else {
-+ return chan->backend.subbuf_size - subbuf_offset(o_begin, chan)
-+ - sizeof(struct metadata_packet_header);
-+ }
-+}
-+
-+static
-+wait_queue_head_t *ltt_get_writer_buf_wait_queue(struct channel *chan, int cpu)
-+{
-+ struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
-+ chan, cpu);
-+ return &buf->write_wait;
-+}
-+
-+static
-+wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
-+{
-+ return &chan->hp_wait;
-+}
-+
-+static
-+int ltt_is_finalized(struct channel *chan)
-+{
-+ return lib_ring_buffer_channel_is_finalized(chan);
-+}
-+
-+static
-+int ltt_is_disabled(struct channel *chan)
-+{
-+ return lib_ring_buffer_channel_is_disabled(chan);
-+}
-+
-+static struct ltt_transport ltt_relay_transport = {
-+ .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
-+ .owner = THIS_MODULE,
-+ .ops = {
-+ .channel_create = _channel_create,
-+ .channel_destroy = ltt_channel_destroy,
-+ .buffer_read_open = ltt_buffer_read_open,
-+ .buffer_has_read_closed_stream =
-+ ltt_buffer_has_read_closed_stream,
-+ .buffer_read_close = ltt_buffer_read_close,
-+ .event_reserve = ltt_event_reserve,
-+ .event_commit = ltt_event_commit,
-+ .event_write_from_user = ltt_event_write_from_user,
-+ .event_memset = ltt_event_memset,
-+ .event_write = ltt_event_write,
-+ .packet_avail_size = ltt_packet_avail_size,
-+ .get_writer_buf_wait_queue = ltt_get_writer_buf_wait_queue,
-+ .get_hp_wait_queue = ltt_get_hp_wait_queue,
-+ .is_finalized = ltt_is_finalized,
-+ .is_disabled = ltt_is_disabled,
-+ },
-+};
-+
-+static int __init ltt_ring_buffer_client_init(void)
-+{
-+ /*
-+ * This vmalloc sync all also takes care of the lib ring buffer
-+ * vmalloc'd module pages when it is built as a module into LTTng.
-+ */
-+ wrapper_vmalloc_sync_all();
-+ ltt_transport_register(&ltt_relay_transport);
-+ return 0;
-+}
-+
-+module_init(ltt_ring_buffer_client_init);
-+
-+static void __exit ltt_ring_buffer_client_exit(void)
-+{
-+ ltt_transport_unregister(&ltt_relay_transport);
-+}
-+
-+module_exit(ltt_ring_buffer_client_exit);
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
-+ " client");
-diff --git a/drivers/staging/lttng/ltt-ring-buffer-metadata-mmap-client.c b/drivers/staging/lttng/ltt-ring-buffer-metadata-mmap-client.c
-new file mode 100644
-index 0000000..5cad3f9
---- /dev/null
-+++ b/drivers/staging/lttng/ltt-ring-buffer-metadata-mmap-client.c
-@@ -0,0 +1,21 @@
-+/*
-+ * ltt-ring-buffer-metadata-client.c
-+ *
-+ * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng lib ring buffer metadta client.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include "ltt-tracer.h"
-+
-+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
-+#define RING_BUFFER_MODE_TEMPLATE_STRING "metadata-mmap"
-+#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_MMAP
-+#include "ltt-ring-buffer-metadata-client.h"
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("LTTng Ring Buffer Metadata Client");
---
-1.7.9
-
diff --git a/patches.lttng/0010-lttng-tracer-control-and-core-structures.patch b/patches.lttng/0010-lttng-tracer-control-and-core-structures.patch
deleted file mode 100644
index 7c3541e84d0..00000000000
--- a/patches.lttng/0010-lttng-tracer-control-and-core-structures.patch
+++ /dev/null
@@ -1,1812 +0,0 @@
-From ccc7340a9415839763e872bceb626638c58174a1 Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 28 Nov 2011 07:42:18 -0500
-Subject: lttng: tracer control and core structures
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- drivers/staging/lttng/ltt-endian.h | 31 +
- drivers/staging/lttng/ltt-events.c | 1009 +++++++++++++++++++++++++++++++
- drivers/staging/lttng/ltt-events.h | 452 ++++++++++++++
- drivers/staging/lttng/ltt-probes.c | 164 +++++
- drivers/staging/lttng/ltt-tracer-core.h | 28 +
- drivers/staging/lttng/ltt-tracer.h | 67 ++
- 6 files changed, 1751 insertions(+), 0 deletions(-)
- create mode 100644 drivers/staging/lttng/ltt-endian.h
- create mode 100644 drivers/staging/lttng/ltt-events.c
- create mode 100644 drivers/staging/lttng/ltt-events.h
- create mode 100644 drivers/staging/lttng/ltt-probes.c
- create mode 100644 drivers/staging/lttng/ltt-tracer-core.h
- create mode 100644 drivers/staging/lttng/ltt-tracer.h
-
-diff --git a/drivers/staging/lttng/ltt-endian.h b/drivers/staging/lttng/ltt-endian.h
-new file mode 100644
-index 0000000..9a0512d
---- /dev/null
-+++ b/drivers/staging/lttng/ltt-endian.h
-@@ -0,0 +1,31 @@
-+#ifndef _LTT_ENDIAN_H
-+#define _LTT_ENDIAN_H
-+
-+/*
-+ * ltt-endian.h
-+ *
-+ * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#ifdef __KERNEL__
-+# include <asm/byteorder.h>
-+# ifdef __BIG_ENDIAN
-+# define __BYTE_ORDER __BIG_ENDIAN
-+# elif defined(__LITTLE_ENDIAN)
-+# define __BYTE_ORDER __LITTLE_ENDIAN
-+# else
-+# error "unknown endianness"
-+# endif
-+#ifndef __BIG_ENDIAN
-+# define __BIG_ENDIAN 4321
-+#endif
-+#ifndef __LITTLE_ENDIAN
-+# define __LITTLE_ENDIAN 1234
-+#endif
-+#else
-+# include <endian.h>
-+#endif
-+
-+#endif /* _LTT_ENDIAN_H */
-diff --git a/drivers/staging/lttng/ltt-events.c b/drivers/staging/lttng/ltt-events.c
-new file mode 100644
-index 0000000..4229914
---- /dev/null
-+++ b/drivers/staging/lttng/ltt-events.c
-@@ -0,0 +1,1009 @@
-+/*
-+ * ltt-events.c
-+ *
-+ * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Holds LTTng per-session event registry.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/list.h>
-+#include <linux/mutex.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/jiffies.h>
-+#include "wrapper/uuid.h"
-+#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
-+#include "ltt-events.h"
-+#include "ltt-tracer.h"
-+
-+static LIST_HEAD(sessions);
-+static LIST_HEAD(ltt_transport_list);
-+static DEFINE_MUTEX(sessions_mutex);
-+static struct kmem_cache *event_cache;
-+
-+static void _ltt_event_destroy(struct ltt_event *event);
-+static void _ltt_channel_destroy(struct ltt_channel *chan);
-+static int _ltt_event_unregister(struct ltt_event *event);
-+static
-+int _ltt_event_metadata_statedump(struct ltt_session *session,
-+ struct ltt_channel *chan,
-+ struct ltt_event *event);
-+static
-+int _ltt_session_metadata_statedump(struct ltt_session *session);
-+
-+void synchronize_trace(void)
-+{
-+ synchronize_sched();
-+#ifdef CONFIG_PREEMPT_RT
-+ synchronize_rcu();
-+#endif
-+}
-+
-+struct ltt_session *ltt_session_create(void)
-+{
-+ struct ltt_session *session;
-+
-+ mutex_lock(&sessions_mutex);
-+ session = kzalloc(sizeof(struct ltt_session), GFP_KERNEL);
-+ if (!session)
-+ return NULL;
-+ INIT_LIST_HEAD(&session->chan);
-+ INIT_LIST_HEAD(&session->events);
-+ uuid_le_gen(&session->uuid);
-+ list_add(&session->list, &sessions);
-+ mutex_unlock(&sessions_mutex);
-+ return session;
-+}
-+
-+void ltt_session_destroy(struct ltt_session *session)
-+{
-+ struct ltt_channel *chan, *tmpchan;
-+ struct ltt_event *event, *tmpevent;
-+ int ret;
-+
-+ mutex_lock(&sessions_mutex);
-+ ACCESS_ONCE(session->active) = 0;
-+ list_for_each_entry(chan, &session->chan, list) {
-+ ret = lttng_syscalls_unregister(chan);
-+ WARN_ON(ret);
-+ }
-+ list_for_each_entry(event, &session->events, list) {
-+ ret = _ltt_event_unregister(event);
-+ WARN_ON(ret);
-+ }
-+ synchronize_trace(); /* Wait for in-flight events to complete */
-+ list_for_each_entry_safe(event, tmpevent, &session->events, list)
-+ _ltt_event_destroy(event);
-+ list_for_each_entry_safe(chan, tmpchan, &session->chan, list)
-+ _ltt_channel_destroy(chan);
-+ list_del(&session->list);
-+ mutex_unlock(&sessions_mutex);
-+ kfree(session);
-+}
-+
-+int ltt_session_enable(struct ltt_session *session)
-+{
-+ int ret = 0;
-+ struct ltt_channel *chan;
-+
-+ mutex_lock(&sessions_mutex);
-+ if (session->active) {
-+ ret = -EBUSY;
-+ goto end;
-+ }
-+
-+ /*
-+ * Snapshot the number of events per channel to know the type of header
-+ * we need to use.
-+ */
-+ list_for_each_entry(chan, &session->chan, list) {
-+ if (chan->header_type)
-+ continue; /* don't change it if session stop/restart */
-+ if (chan->free_event_id < 31)
-+ chan->header_type = 1; /* compact */
-+ else
-+ chan->header_type = 2; /* large */
-+ }
-+
-+ ACCESS_ONCE(session->active) = 1;
-+ ACCESS_ONCE(session->been_active) = 1;
-+ ret = _ltt_session_metadata_statedump(session);
-+ if (ret)
-+ ACCESS_ONCE(session->active) = 0;
-+end:
-+ mutex_unlock(&sessions_mutex);
-+ return ret;
-+}
-+
-+int ltt_session_disable(struct ltt_session *session)
-+{
-+ int ret = 0;
-+
-+ mutex_lock(&sessions_mutex);
-+ if (!session->active) {
-+ ret = -EBUSY;
-+ goto end;
-+ }
-+ ACCESS_ONCE(session->active) = 0;
-+end:
-+ mutex_unlock(&sessions_mutex);
-+ return ret;
-+}
-+
-+int ltt_channel_enable(struct ltt_channel *channel)
-+{
-+ int old;
-+
-+ if (channel == channel->session->metadata)
-+ return -EPERM;
-+ old = xchg(&channel->enabled, 1);
-+ if (old)
-+ return -EEXIST;
-+ return 0;
-+}
-+
-+int ltt_channel_disable(struct ltt_channel *channel)
-+{
-+ int old;
-+
-+ if (channel == channel->session->metadata)
-+ return -EPERM;
-+ old = xchg(&channel->enabled, 0);
-+ if (!old)
-+ return -EEXIST;
-+ return 0;
-+}
-+
-+int ltt_event_enable(struct ltt_event *event)
-+{
-+ int old;
-+
-+ if (event->chan == event->chan->session->metadata)
-+ return -EPERM;
-+ old = xchg(&event->enabled, 1);
-+ if (old)
-+ return -EEXIST;
-+ return 0;
-+}
-+
-+int ltt_event_disable(struct ltt_event *event)
-+{
-+ int old;
-+
-+ if (event->chan == event->chan->session->metadata)
-+ return -EPERM;
-+ old = xchg(&event->enabled, 0);
-+ if (!old)
-+ return -EEXIST;
-+ return 0;
-+}
-+
-+static struct ltt_transport *ltt_transport_find(const char *name)
-+{
-+ struct ltt_transport *transport;
-+
-+ list_for_each_entry(transport, &ltt_transport_list, node) {
-+ if (!strcmp(transport->name, name))
-+ return transport;
-+ }
-+ return NULL;
-+}
-+
-+struct ltt_channel *ltt_channel_create(struct ltt_session *session,
-+ const char *transport_name,
-+ void *buf_addr,
-+ size_t subbuf_size, size_t num_subbuf,
-+ unsigned int switch_timer_interval,
-+ unsigned int read_timer_interval)
-+{
-+ struct ltt_channel *chan;
-+ struct ltt_transport *transport = NULL;
-+
-+ mutex_lock(&sessions_mutex);
-+ if (session->been_active)
-+ goto active; /* Refuse to add channel to active session */
-+ transport = ltt_transport_find(transport_name);
-+ if (!transport) {
-+ printk(KERN_WARNING "LTTng transport %s not found\n",
-+ transport_name);
-+ goto notransport;
-+ }
-+ if (!try_module_get(transport->owner)) {
-+ printk(KERN_WARNING "LTT : Can't lock transport module.\n");
-+ goto notransport;
-+ }
-+ chan = kzalloc(sizeof(struct ltt_channel), GFP_KERNEL);
-+ if (!chan)
-+ goto nomem;
-+ chan->session = session;
-+ chan->id = session->free_chan_id++;
-+ /*
-+ * Note: the channel creation op already writes into the packet
-+ * headers. Therefore the "chan" information used as input
-+ * should be already accessible.
-+ */
-+ chan->chan = transport->ops.channel_create("[lttng]", chan, buf_addr,
-+ subbuf_size, num_subbuf, switch_timer_interval,
-+ read_timer_interval);
-+ if (!chan->chan)
-+ goto create_error;
-+ chan->enabled = 1;
-+ chan->ops = &transport->ops;
-+ chan->transport = transport;
-+ list_add(&chan->list, &session->chan);
-+ mutex_unlock(&sessions_mutex);
-+ return chan;
-+
-+create_error:
-+ kfree(chan);
-+nomem:
-+ if (transport)
-+ module_put(transport->owner);
-+notransport:
-+active:
-+ mutex_unlock(&sessions_mutex);
-+ return NULL;
-+}
-+
-+/*
-+ * Only used internally at session destruction.
-+ */
-+static
-+void _ltt_channel_destroy(struct ltt_channel *chan)
-+{
-+ chan->ops->channel_destroy(chan->chan);
-+ module_put(chan->transport->owner);
-+ list_del(&chan->list);
-+ lttng_destroy_context(chan->ctx);
-+ kfree(chan);
-+}
-+
-+/*
-+ * Supports event creation while tracing session is active.
-+ */
-+struct ltt_event *ltt_event_create(struct ltt_channel *chan,
-+ struct lttng_kernel_event *event_param,
-+ void *filter,
-+ const struct lttng_event_desc *internal_desc)
-+{
-+ struct ltt_event *event;
-+ int ret;
-+
-+ mutex_lock(&sessions_mutex);
-+ if (chan->free_event_id == -1UL)
-+ goto full;
-+ /*
-+ * This is O(n^2) (for each event, the loop is called at event
-+ * creation). Might require a hash if we have lots of events.
-+ */
-+ list_for_each_entry(event, &chan->session->events, list)
-+ if (!strcmp(event->desc->name, event_param->name))
-+ goto exist;
-+ event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
-+ if (!event)
-+ goto cache_error;
-+ event->chan = chan;
-+ event->filter = filter;
-+ event->id = chan->free_event_id++;
-+ event->enabled = 1;
-+ event->instrumentation = event_param->instrumentation;
-+ /* Populate ltt_event structure before tracepoint registration. */
-+ smp_wmb();
-+ switch (event_param->instrumentation) {
-+ case LTTNG_KERNEL_TRACEPOINT:
-+ event->desc = ltt_event_get(event_param->name);
-+ if (!event->desc)
-+ goto register_error;
-+ ret = tracepoint_probe_register(event_param->name,
-+ event->desc->probe_callback,
-+ event);
-+ if (ret)
-+ goto register_error;
-+ break;
-+ case LTTNG_KERNEL_KPROBE:
-+ ret = lttng_kprobes_register(event_param->name,
-+ event_param->u.kprobe.symbol_name,
-+ event_param->u.kprobe.offset,
-+ event_param->u.kprobe.addr,
-+ event);
-+ if (ret)
-+ goto register_error;
-+ ret = try_module_get(event->desc->owner);
-+ WARN_ON_ONCE(!ret);
-+ break;
-+ case LTTNG_KERNEL_KRETPROBE:
-+ {
-+ struct ltt_event *event_return;
-+
-+ /* kretprobe defines 2 events */
-+ event_return =
-+ kmem_cache_zalloc(event_cache, GFP_KERNEL);
-+ if (!event_return)
-+ goto register_error;
-+ event_return->chan = chan;
-+ event_return->filter = filter;
-+ event_return->id = chan->free_event_id++;
-+ event_return->enabled = 1;
-+ event_return->instrumentation = event_param->instrumentation;
-+ /*
-+ * Populate ltt_event structure before kretprobe registration.
-+ */
-+ smp_wmb();
-+ ret = lttng_kretprobes_register(event_param->name,
-+ event_param->u.kretprobe.symbol_name,
-+ event_param->u.kretprobe.offset,
-+ event_param->u.kretprobe.addr,
-+ event, event_return);
-+ if (ret) {
-+ kmem_cache_free(event_cache, event_return);
-+ goto register_error;
-+ }
-+ /* Take 2 refs on the module: one per event. */
-+ ret = try_module_get(event->desc->owner);
-+ WARN_ON_ONCE(!ret);
-+ ret = try_module_get(event->desc->owner);
-+ WARN_ON_ONCE(!ret);
-+ ret = _ltt_event_metadata_statedump(chan->session, chan,
-+ event_return);
-+ if (ret) {
-+ kmem_cache_free(event_cache, event_return);
-+ module_put(event->desc->owner);
-+ module_put(event->desc->owner);
-+ goto statedump_error;
-+ }
-+ list_add(&event_return->list, &chan->session->events);
-+ break;
-+ }
-+ case LTTNG_KERNEL_FUNCTION:
-+ ret = lttng_ftrace_register(event_param->name,
-+ event_param->u.ftrace.symbol_name,
-+ event);
-+ if (ret)
-+ goto register_error;
-+ ret = try_module_get(event->desc->owner);
-+ WARN_ON_ONCE(!ret);
-+ break;
-+ case LTTNG_KERNEL_NOOP:
-+ event->desc = internal_desc;
-+ if (!event->desc)
-+ goto register_error;
-+ break;
-+ default:
-+ WARN_ON_ONCE(1);
-+ }
-+ ret = _ltt_event_metadata_statedump(chan->session, chan, event);
-+ if (ret)
-+ goto statedump_error;
-+ list_add(&event->list, &chan->session->events);
-+ mutex_unlock(&sessions_mutex);
-+ return event;
-+
-+statedump_error:
-+ /* If a statedump error occurs, events will not be readable. */
-+register_error:
-+ kmem_cache_free(event_cache, event);
-+cache_error:
-+exist:
-+full:
-+ mutex_unlock(&sessions_mutex);
-+ return NULL;
-+}
-+
-+/*
-+ * Only used internally at session destruction.
-+ */
-+int _ltt_event_unregister(struct ltt_event *event)
-+{
-+ int ret = -EINVAL;
-+
-+ switch (event->instrumentation) {
-+ case LTTNG_KERNEL_TRACEPOINT:
-+ ret = tracepoint_probe_unregister(event->desc->name,
-+ event->desc->probe_callback,
-+ event);
-+ if (ret)
-+ return ret;
-+ break;
-+ case LTTNG_KERNEL_KPROBE:
-+ lttng_kprobes_unregister(event);
-+ ret = 0;
-+ break;
-+ case LTTNG_KERNEL_KRETPROBE:
-+ lttng_kretprobes_unregister(event);
-+ ret = 0;
-+ break;
-+ case LTTNG_KERNEL_FUNCTION:
-+ lttng_ftrace_unregister(event);
-+ ret = 0;
-+ break;
-+ case LTTNG_KERNEL_NOOP:
-+ ret = 0;
-+ break;
-+ default:
-+ WARN_ON_ONCE(1);
-+ }
-+ return ret;
-+}
-+
-+/*
-+ * Only used internally at session destruction.
-+ */
-+static
-+void _ltt_event_destroy(struct ltt_event *event)
-+{
-+ switch (event->instrumentation) {
-+ case LTTNG_KERNEL_TRACEPOINT:
-+ ltt_event_put(event->desc);
-+ break;
-+ case LTTNG_KERNEL_KPROBE:
-+ module_put(event->desc->owner);
-+ lttng_kprobes_destroy_private(event);
-+ break;
-+ case LTTNG_KERNEL_KRETPROBE:
-+ module_put(event->desc->owner);
-+ lttng_kretprobes_destroy_private(event);
-+ break;
-+ case LTTNG_KERNEL_FUNCTION:
-+ module_put(event->desc->owner);
-+ lttng_ftrace_destroy_private(event);
-+ break;
-+ case LTTNG_KERNEL_NOOP:
-+ break;
-+ default:
-+ WARN_ON_ONCE(1);
-+ }
-+ list_del(&event->list);
-+ lttng_destroy_context(event->ctx);
-+ kmem_cache_free(event_cache, event);
-+}
-+
-+/*
-+ * We have exclusive access to our metadata buffer (protected by the
-+ * sessions_mutex), so we can do racy operations such as looking for
-+ * remaining space left in packet and write, since mutual exclusion
-+ * protects us from concurrent writes.
-+ */
-+int lttng_metadata_printf(struct ltt_session *session,
-+ const char *fmt, ...)
-+{
-+ struct lib_ring_buffer_ctx ctx;
-+ struct ltt_channel *chan = session->metadata;
-+ char *str;
-+ int ret = 0, waitret;
-+ size_t len, reserve_len, pos;
-+ va_list ap;
-+
-+ WARN_ON_ONCE(!ACCESS_ONCE(session->active));
-+
-+ va_start(ap, fmt);
-+ str = kvasprintf(GFP_KERNEL, fmt, ap);
-+ va_end(ap);
-+ if (!str)
-+ return -ENOMEM;
-+
-+ len = strlen(str);
-+ pos = 0;
-+
-+ for (pos = 0; pos < len; pos += reserve_len) {
-+ reserve_len = min_t(size_t,
-+ chan->ops->packet_avail_size(chan->chan),
-+ len - pos);
-+ lib_ring_buffer_ctx_init(&ctx, chan->chan, NULL, reserve_len,
-+ sizeof(char), -1);
-+ /*
-+ * We don't care about metadata buffer's records lost
-+ * count, because we always retry here. Report error if
-+ * we need to bail out after timeout or being
-+ * interrupted.
-+ */
-+ waitret = wait_event_interruptible_timeout(*chan->ops->get_writer_buf_wait_queue(chan->chan, -1),
-+ ({
-+ ret = chan->ops->event_reserve(&ctx, 0);
-+ ret != -ENOBUFS || !ret;
-+ }),
-+ msecs_to_jiffies(LTTNG_METADATA_TIMEOUT_MSEC));
-+ if (!waitret || waitret == -ERESTARTSYS || ret) {
-+ printk(KERN_WARNING "LTTng: Failure to write metadata to buffers (%s)\n",
-+ waitret == -ERESTARTSYS ? "interrupted" :
-+ (ret == -ENOBUFS ? "timeout" : "I/O error"));
-+ if (waitret == -ERESTARTSYS)
-+ ret = waitret;
-+ goto end;
-+ }
-+ chan->ops->event_write(&ctx, &str[pos], reserve_len);
-+ chan->ops->event_commit(&ctx);
-+ }
-+end:
-+ kfree(str);
-+ return ret;
-+}
-+
-+static
-+int _ltt_field_statedump(struct ltt_session *session,
-+ const struct lttng_event_field *field)
-+{
-+ int ret = 0;
-+
-+ switch (field->type.atype) {
-+ case atype_integer:
-+ ret = lttng_metadata_printf(session,
-+ " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s;\n",
-+ field->type.u.basic.integer.size,
-+ field->type.u.basic.integer.alignment,
-+ field->type.u.basic.integer.signedness,
-+ (field->type.u.basic.integer.encoding == lttng_encode_none)
-+ ? "none"
-+ : (field->type.u.basic.integer.encoding == lttng_encode_UTF8)
-+ ? "UTF8"
-+ : "ASCII",
-+ field->type.u.basic.integer.base,
-+#ifdef __BIG_ENDIAN
-+ field->type.u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
-+#else
-+ field->type.u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
-+#endif
-+ field->name);
-+ break;
-+ case atype_enum:
-+ ret = lttng_metadata_printf(session,
-+ " %s _%s;\n",
-+ field->type.u.basic.enumeration.name,
-+ field->name);
-+ break;
-+ case atype_array:
-+ {
-+ const struct lttng_basic_type *elem_type;
-+
-+ elem_type = &field->type.u.array.elem_type;
-+ ret = lttng_metadata_printf(session,
-+ " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[%u];\n",
-+ elem_type->u.basic.integer.size,
-+ elem_type->u.basic.integer.alignment,
-+ elem_type->u.basic.integer.signedness,
-+ (elem_type->u.basic.integer.encoding == lttng_encode_none)
-+ ? "none"
-+ : (elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
-+ ? "UTF8"
-+ : "ASCII",
-+ elem_type->u.basic.integer.base,
-+#ifdef __BIG_ENDIAN
-+ elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
-+#else
-+ elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
-+#endif
-+ field->name, field->type.u.array.length);
-+ break;
-+ }
-+ case atype_sequence:
-+ {
-+ const struct lttng_basic_type *elem_type;
-+ const struct lttng_basic_type *length_type;
-+
-+ elem_type = &field->type.u.sequence.elem_type;
-+ length_type = &field->type.u.sequence.length_type;
-+ ret = lttng_metadata_printf(session,
-+ " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } __%s_length;\n",
-+ length_type->u.basic.integer.size,
-+ (unsigned int) length_type->u.basic.integer.alignment,
-+ length_type->u.basic.integer.signedness,
-+ (length_type->u.basic.integer.encoding == lttng_encode_none)
-+ ? "none"
-+ : ((length_type->u.basic.integer.encoding == lttng_encode_UTF8)
-+ ? "UTF8"
-+ : "ASCII"),
-+ length_type->u.basic.integer.base,
-+#ifdef __BIG_ENDIAN
-+ length_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
-+#else
-+ length_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
-+#endif
-+ field->name);
-+ if (ret)
-+ return ret;
-+
-+ ret = lttng_metadata_printf(session,
-+ " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[ __%s_length ];\n",
-+ elem_type->u.basic.integer.size,
-+ (unsigned int) elem_type->u.basic.integer.alignment,
-+ elem_type->u.basic.integer.signedness,
-+ (elem_type->u.basic.integer.encoding == lttng_encode_none)
-+ ? "none"
-+ : ((elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
-+ ? "UTF8"
-+ : "ASCII"),
-+ elem_type->u.basic.integer.base,
-+#ifdef __BIG_ENDIAN
-+ elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
-+#else
-+ elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
-+#endif
-+ field->name,
-+ field->name);
-+ break;
-+ }
-+
-+ case atype_string:
-+ /* Default encoding is UTF8 */
-+ ret = lttng_metadata_printf(session,
-+ " string%s _%s;\n",
-+ field->type.u.basic.string.encoding == lttng_encode_ASCII ?
-+ " { encoding = ASCII; }" : "",
-+ field->name);
-+ break;
-+ default:
-+ WARN_ON_ONCE(1);
-+ return -EINVAL;
-+ }
-+ return ret;
-+}
-+
-+static
-+int _ltt_context_metadata_statedump(struct ltt_session *session,
-+ struct lttng_ctx *ctx)
-+{
-+ int ret = 0;
-+ int i;
-+
-+ if (!ctx)
-+ return 0;
-+ for (i = 0; i < ctx->nr_fields; i++) {
-+ const struct lttng_ctx_field *field = &ctx->fields[i];
-+
-+ ret = _ltt_field_statedump(session, &field->event_field);
-+ if (ret)
-+ return ret;
-+ }
-+ return ret;
-+}
-+
-+static
-+int _ltt_fields_metadata_statedump(struct ltt_session *session,
-+ struct ltt_event *event)
-+{
-+ const struct lttng_event_desc *desc = event->desc;
-+ int ret = 0;
-+ int i;
-+
-+ for (i = 0; i < desc->nr_fields; i++) {
-+ const struct lttng_event_field *field = &desc->fields[i];
-+
-+ ret = _ltt_field_statedump(session, field);
-+ if (ret)
-+ return ret;
-+ }
-+ return ret;
-+}
-+
-+static
-+int _ltt_event_metadata_statedump(struct ltt_session *session,
-+ struct ltt_channel *chan,
-+ struct ltt_event *event)
-+{
-+ int ret = 0;
-+
-+ if (event->metadata_dumped || !ACCESS_ONCE(session->active))
-+ return 0;
-+ if (chan == session->metadata)
-+ return 0;
-+
-+ ret = lttng_metadata_printf(session,
-+ "event {\n"
-+ " name = %s;\n"
-+ " id = %u;\n"
-+ " stream_id = %u;\n",
-+ event->desc->name,
-+ event->id,
-+ event->chan->id);
-+ if (ret)
-+ goto end;
-+
-+ if (event->ctx) {
-+ ret = lttng_metadata_printf(session,
-+ " context := struct {\n");
-+ if (ret)
-+ goto end;
-+ }
-+ ret = _ltt_context_metadata_statedump(session, event->ctx);
-+ if (ret)
-+ goto end;
-+ if (event->ctx) {
-+ ret = lttng_metadata_printf(session,
-+ " };\n");
-+ if (ret)
-+ goto end;
-+ }
-+
-+ ret = lttng_metadata_printf(session,
-+ " fields := struct {\n"
-+ );
-+ if (ret)
-+ goto end;
-+
-+ ret = _ltt_fields_metadata_statedump(session, event);
-+ if (ret)
-+ goto end;
-+
-+ /*
-+ * LTTng space reservation can only reserve multiples of the
-+ * byte size.
-+ */
-+ ret = lttng_metadata_printf(session,
-+ " };\n"
-+ "};\n\n");
-+ if (ret)
-+ goto end;
-+
-+ event->metadata_dumped = 1;
-+end:
-+ return ret;
-+
-+}
-+
-+static
-+int _ltt_channel_metadata_statedump(struct ltt_session *session,
-+ struct ltt_channel *chan)
-+{
-+ int ret = 0;
-+
-+ if (chan->metadata_dumped || !ACCESS_ONCE(session->active))
-+ return 0;
-+ if (chan == session->metadata)
-+ return 0;
-+
-+ WARN_ON_ONCE(!chan->header_type);
-+ ret = lttng_metadata_printf(session,
-+ "stream {\n"
-+ " id = %u;\n"
-+ " event.header := %s;\n"
-+ " packet.context := struct packet_context;\n",
-+ chan->id,
-+ chan->header_type == 1 ? "struct event_header_compact" :
-+ "struct event_header_large");
-+ if (ret)
-+ goto end;
-+
-+ if (chan->ctx) {
-+ ret = lttng_metadata_printf(session,
-+ " event.context := struct {\n");
-+ if (ret)
-+ goto end;
-+ }
-+ ret = _ltt_context_metadata_statedump(session, chan->ctx);
-+ if (ret)
-+ goto end;
-+ if (chan->ctx) {
-+ ret = lttng_metadata_printf(session,
-+ " };\n");
-+ if (ret)
-+ goto end;
-+ }
-+
-+ ret = lttng_metadata_printf(session,
-+ "};\n\n");
-+
-+ chan->metadata_dumped = 1;
-+end:
-+ return ret;
-+}
-+
-+static
-+int _ltt_stream_packet_context_declare(struct ltt_session *session)
-+{
-+ return lttng_metadata_printf(session,
-+ "struct packet_context {\n"
-+ " uint64_t timestamp_begin;\n"
-+ " uint64_t timestamp_end;\n"
-+ " uint32_t events_discarded;\n"
-+ " uint32_t content_size;\n"
-+ " uint32_t packet_size;\n"
-+ " uint32_t cpu_id;\n"
-+ "};\n\n"
-+ );
-+}
-+
-+/*
-+ * Compact header:
-+ * id: range: 0 - 30.
-+ * id 31 is reserved to indicate an extended header.
-+ *
-+ * Large header:
-+ * id: range: 0 - 65534.
-+ * id 65535 is reserved to indicate an extended header.
-+ */
-+static
-+int _ltt_event_header_declare(struct ltt_session *session)
-+{
-+ return lttng_metadata_printf(session,
-+ "struct event_header_compact {\n"
-+ " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
-+ " variant <id> {\n"
-+ " struct {\n"
-+ " uint27_t timestamp;\n"
-+ " } compact;\n"
-+ " struct {\n"
-+ " uint32_t id;\n"
-+ " uint64_t timestamp;\n"
-+ " } extended;\n"
-+ " } v;\n"
-+ "} align(%u);\n"
-+ "\n"
-+ "struct event_header_large {\n"
-+ " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
-+ " variant <id> {\n"
-+ " struct {\n"
-+ " uint32_t timestamp;\n"
-+ " } compact;\n"
-+ " struct {\n"
-+ " uint32_t id;\n"
-+ " uint64_t timestamp;\n"
-+ " } extended;\n"
-+ " } v;\n"
-+ "} align(%u);\n\n",
-+ ltt_alignof(uint32_t) * CHAR_BIT,
-+ ltt_alignof(uint16_t) * CHAR_BIT
-+ );
-+}
-+
-+/*
-+ * Output metadata into this session's metadata buffers.
-+ */
-+static
-+int _ltt_session_metadata_statedump(struct ltt_session *session)
-+{
-+ unsigned char *uuid_c = session->uuid.b;
-+ unsigned char uuid_s[37];
-+ struct ltt_channel *chan;
-+ struct ltt_event *event;
-+ int ret = 0;
-+
-+ if (!ACCESS_ONCE(session->active))
-+ return 0;
-+ if (session->metadata_dumped)
-+ goto skip_session;
-+ if (!session->metadata) {
-+ printk(KERN_WARNING "LTTng: attempt to start tracing, but metadata channel is not found. Operation abort.\n");
-+ return -EPERM;
-+ }
-+
-+ snprintf(uuid_s, sizeof(uuid_s),
-+ "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
-+ uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
-+ uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
-+ uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
-+ uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
-+
-+ ret = lttng_metadata_printf(session,
-+ "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
-+ "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
-+ "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
-+ "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
-+ "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
-+ "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
-+ "\n"
-+ "trace {\n"
-+ " major = %u;\n"
-+ " minor = %u;\n"
-+ " uuid = \"%s\";\n"
-+ " byte_order = %s;\n"
-+ " packet.header := struct {\n"
-+ " uint32_t magic;\n"
-+ " uint8_t uuid[16];\n"
-+ " uint32_t stream_id;\n"
-+ " };\n"
-+ "};\n\n",
-+ ltt_alignof(uint8_t) * CHAR_BIT,
-+ ltt_alignof(uint16_t) * CHAR_BIT,
-+ ltt_alignof(uint32_t) * CHAR_BIT,
-+ ltt_alignof(uint64_t) * CHAR_BIT,
-+ CTF_VERSION_MAJOR,
-+ CTF_VERSION_MINOR,
-+ uuid_s,
-+#ifdef __BIG_ENDIAN
-+ "be"
-+#else
-+ "le"
-+#endif
-+ );
-+ if (ret)
-+ goto end;
-+
-+ ret = _ltt_stream_packet_context_declare(session);
-+ if (ret)
-+ goto end;
-+
-+ ret = _ltt_event_header_declare(session);
-+ if (ret)
-+ goto end;
-+
-+skip_session:
-+ list_for_each_entry(chan, &session->chan, list) {
-+ ret = _ltt_channel_metadata_statedump(session, chan);
-+ if (ret)
-+ goto end;
-+ }
-+
-+ list_for_each_entry(event, &session->events, list) {
-+ ret = _ltt_event_metadata_statedump(session, event->chan, event);
-+ if (ret)
-+ goto end;
-+ }
-+ session->metadata_dumped = 1;
-+end:
-+ return ret;
-+}
-+
-+/**
-+ * ltt_transport_register - LTT transport registration
-+ * @transport: transport structure
-+ *
-+ * Registers a transport which can be used as output to extract the data out of
-+ * LTTng. The module calling this registration function must ensure that no
-+ * trap-inducing code will be executed by the transport functions. E.g.
-+ * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
-+ * is made visible to the transport function. This registration acts as a
-+ * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
-+ * after its registration must it synchronize the TLBs.
-+ */
-+void ltt_transport_register(struct ltt_transport *transport)
-+{
-+ /*
-+ * Make sure no page fault can be triggered by the module about to be
-+ * registered. We deal with this here so we don't have to call
-+ * vmalloc_sync_all() in each module's init.
-+ */
-+ wrapper_vmalloc_sync_all();
-+
-+ mutex_lock(&sessions_mutex);
-+ list_add_tail(&transport->node, &ltt_transport_list);
-+ mutex_unlock(&sessions_mutex);
-+}
-+EXPORT_SYMBOL_GPL(ltt_transport_register);
-+
-+/**
-+ * ltt_transport_unregister - LTT transport unregistration
-+ * @transport: transport structure
-+ */
-+void ltt_transport_unregister(struct ltt_transport *transport)
-+{
-+ mutex_lock(&sessions_mutex);
-+ list_del(&transport->node);
-+ mutex_unlock(&sessions_mutex);
-+}
-+EXPORT_SYMBOL_GPL(ltt_transport_unregister);
-+
-+static int __init ltt_events_init(void)
-+{
-+ int ret;
-+
-+ event_cache = KMEM_CACHE(ltt_event, 0);
-+ if (!event_cache)
-+ return -ENOMEM;
-+ ret = ltt_debugfs_abi_init();
-+ if (ret)
-+ goto error_abi;
-+ return 0;
-+error_abi:
-+ kmem_cache_destroy(event_cache);
-+ return ret;
-+}
-+
-+module_init(ltt_events_init);
-+
-+static void __exit ltt_events_exit(void)
-+{
-+ struct ltt_session *session, *tmpsession;
-+
-+ ltt_debugfs_abi_exit();
-+ list_for_each_entry_safe(session, tmpsession, &sessions, list)
-+ ltt_session_destroy(session);
-+ kmem_cache_destroy(event_cache);
-+}
-+
-+module_exit(ltt_events_exit);
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-+MODULE_DESCRIPTION("LTTng Events");
-diff --git a/drivers/staging/lttng/ltt-events.h b/drivers/staging/lttng/ltt-events.h
-new file mode 100644
-index 0000000..36b281a
---- /dev/null
-+++ b/drivers/staging/lttng/ltt-events.h
-@@ -0,0 +1,452 @@
-+#ifndef _LTT_EVENTS_H
-+#define _LTT_EVENTS_H
-+
-+/*
-+ * ltt-events.h
-+ *
-+ * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Holds LTTng per-session event registry.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/list.h>
-+#include <linux/kprobes.h>
-+#include "wrapper/uuid.h"
-+#include "ltt-debugfs-abi.h"
-+
-+#undef is_signed_type
-+#define is_signed_type(type) (((type)(-1)) < 0)
-+
-+struct ltt_channel;
-+struct ltt_session;
-+struct lib_ring_buffer_ctx;
-+struct perf_event;
-+struct perf_event_attr;
-+
-+/* Type description */
-+
-+/* Update the astract_types name table in lttng-types.c along with this enum */
-+enum abstract_types {
-+ atype_integer,
-+ atype_enum,
-+ atype_array,
-+ atype_sequence,
-+ atype_string,
-+ NR_ABSTRACT_TYPES,
-+};
-+
-+/* Update the string_encodings name table in lttng-types.c along with this enum */
-+enum lttng_string_encodings {
-+ lttng_encode_none = 0,
-+ lttng_encode_UTF8 = 1,
-+ lttng_encode_ASCII = 2,
-+ NR_STRING_ENCODINGS,
-+};
-+
-+struct lttng_enum_entry {
-+ unsigned long long start, end; /* start and end are inclusive */
-+ const char *string;
-+};
-+
-+#define __type_integer(_type, _byte_order, _base, _encoding) \
-+ { \
-+ .atype = atype_integer, \
-+ .u.basic.integer = \
-+ { \
-+ .size = sizeof(_type) * CHAR_BIT, \
-+ .alignment = ltt_alignof(_type) * CHAR_BIT, \
-+ .signedness = is_signed_type(_type), \
-+ .reverse_byte_order = _byte_order != __BYTE_ORDER, \
-+ .base = _base, \
-+ .encoding = lttng_encode_##_encoding, \
-+ }, \
-+ } \
-+
-+struct lttng_integer_type {
-+ unsigned int size; /* in bits */
-+ unsigned short alignment; /* in bits */
-+ unsigned int signedness:1;
-+ unsigned int reverse_byte_order:1;
-+ unsigned int base; /* 2, 8, 10, 16, for pretty print */
-+ enum lttng_string_encodings encoding;
-+};
-+
-+union _lttng_basic_type {
-+ struct lttng_integer_type integer;
-+ struct {
-+ const char *name;
-+ } enumeration;
-+ struct {
-+ enum lttng_string_encodings encoding;
-+ } string;
-+};
-+
-+struct lttng_basic_type {
-+ enum abstract_types atype;
-+ union {
-+ union _lttng_basic_type basic;
-+ } u;
-+};
-+
-+struct lttng_type {
-+ enum abstract_types atype;
-+ union {
-+ union _lttng_basic_type basic;
-+ struct {
-+ struct lttng_basic_type elem_type;
-+ unsigned int length; /* num. elems. */
-+ } array;
-+ struct {
-+ struct lttng_basic_type length_type;
-+ struct lttng_basic_type elem_type;
-+ } sequence;
-+ } u;
-+};
-+
-+struct lttng_enum {
-+ const char *name;
-+ struct lttng_type container_type;
-+ const struct lttng_enum_entry *entries;
-+ unsigned int len;
-+};
-+
-+/* Event field description */
-+
-+struct lttng_event_field {
-+ const char *name;
-+ struct lttng_type type;
-+};
-+
-+/*
-+ * We need to keep this perf counter field separately from struct
-+ * lttng_ctx_field because cpu hotplug needs fixed-location addresses.
-+ */
-+struct lttng_perf_counter_field {
-+ struct notifier_block nb;
-+ int hp_enable;
-+ struct perf_event_attr *attr;
-+ struct perf_event **e; /* per-cpu array */
-+};
-+
-+struct lttng_ctx_field {
-+ struct lttng_event_field event_field;
-+ size_t (*get_size)(size_t offset);
-+ void (*record)(struct lttng_ctx_field *field,
-+ struct lib_ring_buffer_ctx *ctx,
-+ struct ltt_channel *chan);
-+ union {
-+ struct lttng_perf_counter_field *perf_counter;
-+ } u;
-+ void (*destroy)(struct lttng_ctx_field *field);
-+};
-+
-+struct lttng_ctx {
-+ struct lttng_ctx_field *fields;
-+ unsigned int nr_fields;
-+ unsigned int allocated_fields;
-+};
-+
-+struct lttng_event_desc {
-+ const char *name;
-+ void *probe_callback;
-+ const struct lttng_event_ctx *ctx; /* context */
-+ const struct lttng_event_field *fields; /* event payload */
-+ unsigned int nr_fields;
-+ struct module *owner;
-+};
-+
-+struct lttng_probe_desc {
-+ const struct lttng_event_desc **event_desc;
-+ unsigned int nr_events;
-+ struct list_head head; /* chain registered probes */
-+};
-+
-+struct lttng_krp; /* Kretprobe handling */
-+
-+/*
-+ * ltt_event structure is referred to by the tracing fast path. It must be
-+ * kept small.
-+ */
-+struct ltt_event {
-+ unsigned int id;
-+ struct ltt_channel *chan;
-+ int enabled;
-+ const struct lttng_event_desc *desc;
-+ void *filter;
-+ struct lttng_ctx *ctx;
-+ enum lttng_kernel_instrumentation instrumentation;
-+ union {
-+ struct {
-+ struct kprobe kp;
-+ char *symbol_name;
-+ } kprobe;
-+ struct {
-+ struct lttng_krp *lttng_krp;
-+ char *symbol_name;
-+ } kretprobe;
-+ struct {
-+ char *symbol_name;
-+ } ftrace;
-+ } u;
-+ struct list_head list; /* Event list */
-+ int metadata_dumped:1;
-+};
-+
-+struct ltt_channel_ops {
-+ struct channel *(*channel_create)(const char *name,
-+ struct ltt_channel *ltt_chan,
-+ void *buf_addr,
-+ size_t subbuf_size, size_t num_subbuf,
-+ unsigned int switch_timer_interval,
-+ unsigned int read_timer_interval);
-+ void (*channel_destroy)(struct channel *chan);
-+ struct lib_ring_buffer *(*buffer_read_open)(struct channel *chan);
-+ int (*buffer_has_read_closed_stream)(struct channel *chan);
-+ void (*buffer_read_close)(struct lib_ring_buffer *buf);
-+ int (*event_reserve)(struct lib_ring_buffer_ctx *ctx,
-+ uint32_t event_id);
-+ void (*event_commit)(struct lib_ring_buffer_ctx *ctx);
-+ void (*event_write)(struct lib_ring_buffer_ctx *ctx, const void *src,
-+ size_t len);
-+ void (*event_write_from_user)(struct lib_ring_buffer_ctx *ctx,
-+ const void *src, size_t len);
-+ void (*event_memset)(struct lib_ring_buffer_ctx *ctx,
-+ int c, size_t len);
-+ /*
-+ * packet_avail_size returns the available size in the current
-+ * packet. Note that the size returned is only a hint, since it
-+ * may change due to concurrent writes.
-+ */
-+ size_t (*packet_avail_size)(struct channel *chan);
-+ wait_queue_head_t *(*get_writer_buf_wait_queue)(struct channel *chan, int cpu);
-+ wait_queue_head_t *(*get_hp_wait_queue)(struct channel *chan);
-+ int (*is_finalized)(struct channel *chan);
-+ int (*is_disabled)(struct channel *chan);
-+};
-+
-+struct ltt_transport {
-+ char *name;
-+ struct module *owner;
-+ struct list_head node;
-+ struct ltt_channel_ops ops;
-+};
-+
-+struct ltt_channel {
-+ unsigned int id;
-+ struct channel *chan; /* Channel buffers */
-+ int enabled;
-+ struct lttng_ctx *ctx;
-+ /* Event ID management */
-+ struct ltt_session *session;
-+ struct file *file; /* File associated to channel */
-+ unsigned int free_event_id; /* Next event ID to allocate */
-+ struct list_head list; /* Channel list */
-+ struct ltt_channel_ops *ops;
-+ struct ltt_transport *transport;
-+ struct ltt_event **sc_table; /* for syscall tracing */
-+ struct ltt_event **compat_sc_table;
-+ struct ltt_event *sc_unknown; /* for unknown syscalls */
-+ struct ltt_event *sc_compat_unknown;
-+ struct ltt_event *sc_exit; /* for syscall exit */
-+ int header_type; /* 0: unset, 1: compact, 2: large */
-+ int metadata_dumped:1;
-+};
-+
-+struct ltt_session {
-+ int active; /* Is trace session active ? */
-+ int been_active; /* Has trace session been active ? */
-+ struct file *file; /* File associated to session */
-+ struct ltt_channel *metadata; /* Metadata channel */
-+ struct list_head chan; /* Channel list head */
-+ struct list_head events; /* Event list head */
-+ struct list_head list; /* Session list */
-+ unsigned int free_chan_id; /* Next chan ID to allocate */
-+ uuid_le uuid; /* Trace session unique ID */
-+ int metadata_dumped:1;
-+};
-+
-+struct ltt_session *ltt_session_create(void);
-+int ltt_session_enable(struct ltt_session *session);
-+int ltt_session_disable(struct ltt_session *session);
-+void ltt_session_destroy(struct ltt_session *session);
-+
-+struct ltt_channel *ltt_channel_create(struct ltt_session *session,
-+ const char *transport_name,
-+ void *buf_addr,
-+ size_t subbuf_size, size_t num_subbuf,
-+ unsigned int switch_timer_interval,
-+ unsigned int read_timer_interval);
-+struct ltt_channel *ltt_global_channel_create(struct ltt_session *session,
-+ int overwrite, void *buf_addr,
-+ size_t subbuf_size, size_t num_subbuf,
-+ unsigned int switch_timer_interval,
-+ unsigned int read_timer_interval);
-+
-+struct ltt_event *ltt_event_create(struct ltt_channel *chan,
-+ struct lttng_kernel_event *event_param,
-+ void *filter,
-+ const struct lttng_event_desc *internal_desc);
-+
-+int ltt_channel_enable(struct ltt_channel *channel);
-+int ltt_channel_disable(struct ltt_channel *channel);
-+int ltt_event_enable(struct ltt_event *event);
-+int ltt_event_disable(struct ltt_event *event);
-+
-+void ltt_transport_register(struct ltt_transport *transport);
-+void ltt_transport_unregister(struct ltt_transport *transport);
-+
-+void synchronize_trace(void);
-+int ltt_debugfs_abi_init(void);
-+void ltt_debugfs_abi_exit(void);
-+
-+int ltt_probe_register(struct lttng_probe_desc *desc);
-+void ltt_probe_unregister(struct lttng_probe_desc *desc);
-+const struct lttng_event_desc *ltt_event_get(const char *name);
-+void ltt_event_put(const struct lttng_event_desc *desc);
-+int ltt_probes_init(void);
-+void ltt_probes_exit(void);
-+
-+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
-+int lttng_syscalls_register(struct ltt_channel *chan, void *filter);
-+int lttng_syscalls_unregister(struct ltt_channel *chan);
-+#else
-+static inline int lttng_syscalls_register(struct ltt_channel *chan, void *filter)
-+{
-+ return -ENOSYS;
-+}
-+
-+static inline int lttng_syscalls_unregister(struct ltt_channel *chan)
-+{
-+ return 0;
-+}
-+#endif
-+
-+struct lttng_ctx_field *lttng_append_context(struct lttng_ctx **ctx);
-+int lttng_find_context(struct lttng_ctx *ctx, const char *name);
-+void lttng_remove_context_field(struct lttng_ctx **ctx,
-+ struct lttng_ctx_field *field);
-+void lttng_destroy_context(struct lttng_ctx *ctx);
-+int lttng_add_pid_to_ctx(struct lttng_ctx **ctx);
-+int lttng_add_procname_to_ctx(struct lttng_ctx **ctx);
-+int lttng_add_prio_to_ctx(struct lttng_ctx **ctx);
-+int lttng_add_nice_to_ctx(struct lttng_ctx **ctx);
-+int lttng_add_vpid_to_ctx(struct lttng_ctx **ctx);
-+int lttng_add_tid_to_ctx(struct lttng_ctx **ctx);
-+int lttng_add_vtid_to_ctx(struct lttng_ctx **ctx);
-+int lttng_add_ppid_to_ctx(struct lttng_ctx **ctx);
-+int lttng_add_vppid_to_ctx(struct lttng_ctx **ctx);
-+#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
-+int lttng_add_perf_counter_to_ctx(uint32_t type,
-+ uint64_t config,
-+ const char *name,
-+ struct lttng_ctx **ctx);
-+#else
-+static inline
-+int lttng_add_perf_counter_to_ctx(uint32_t type,
-+ uint64_t config,
-+ const char *name,
-+ struct lttng_ctx **ctx)
-+{
-+ return -ENOSYS;
-+}
-+#endif
-+
-+#ifdef CONFIG_KPROBES
-+int lttng_kprobes_register(const char *name,
-+ const char *symbol_name,
-+ uint64_t offset,
-+ uint64_t addr,
-+ struct ltt_event *event);
-+void lttng_kprobes_unregister(struct ltt_event *event);
-+void lttng_kprobes_destroy_private(struct ltt_event *event);
-+#else
-+static inline
-+int lttng_kprobes_register(const char *name,
-+ const char *symbol_name,
-+ uint64_t offset,
-+ uint64_t addr,
-+ struct ltt_event *event)
-+{
-+ return -ENOSYS;
-+}
-+
-+static inline
-+void lttng_kprobes_unregister(struct ltt_event *event)
-+{
-+}
-+
-+static inline
-+void lttng_kprobes_destroy_private(struct ltt_event *event)
-+{
-+}
-+#endif
-+
-+#ifdef CONFIG_KRETPROBES
-+int lttng_kretprobes_register(const char *name,
-+ const char *symbol_name,
-+ uint64_t offset,
-+ uint64_t addr,
-+ struct ltt_event *event_entry,
-+ struct ltt_event *event_exit);
-+void lttng_kretprobes_unregister(struct ltt_event *event);
-+void lttng_kretprobes_destroy_private(struct ltt_event *event);
-+#else
-+static inline
-+int lttng_kretprobes_register(const char *name,
-+ const char *symbol_name,
-+ uint64_t offset,
-+ uint64_t addr,
-+ struct ltt_event *event_entry,
-+ struct ltt_event *event_exit)
-+{
-+ return -ENOSYS;
-+}
-+
-+static inline
-+void lttng_kretprobes_unregister(struct ltt_event *event)
-+{
-+}
-+
-+static inline
-+void lttng_kretprobes_destroy_private(struct ltt_event *event)
-+{
-+}
-+#endif
-+
-+#ifdef CONFIG_DYNAMIC_FTRACE
-+int lttng_ftrace_register(const char *name,
-+ const char *symbol_name,
-+ struct ltt_event *event);
-+void lttng_ftrace_unregister(struct ltt_event *event);
-+void lttng_ftrace_destroy_private(struct ltt_event *event);
-+#else
-+static inline
-+int lttng_ftrace_register(const char *name,
-+ const char *symbol_name,
-+ struct ltt_event *event)
-+{
-+ return -ENOSYS;
-+}
-+
-+static inline
-+void lttng_ftrace_unregister(struct ltt_event *event)
-+{
-+}
-+
-+static inline
-+void lttng_ftrace_destroy_private(struct ltt_event *event)
-+{
-+}
-+#endif
-+
-+int lttng_calibrate(struct lttng_kernel_calibrate *calibrate);
-+
-+extern const struct file_operations lttng_tracepoint_list_fops;
-+
-+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
-+#define TRACEPOINT_HAS_DATA_ARG
-+#endif
-+
-+#endif /* _LTT_EVENTS_H */
-diff --git a/drivers/staging/lttng/ltt-probes.c b/drivers/staging/lttng/ltt-probes.c
-new file mode 100644
-index 0000000..81dcbd7
---- /dev/null
-+++ b/drivers/staging/lttng/ltt-probes.c
-@@ -0,0 +1,164 @@
-+/*
-+ * ltt-probes.c
-+ *
-+ * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Holds LTTng probes registry.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/list.h>
-+#include <linux/mutex.h>
-+#include <linux/seq_file.h>
-+
-+#include "ltt-events.h"
-+
-+static LIST_HEAD(probe_list);
-+static DEFINE_MUTEX(probe_mutex);
-+
-+static
-+const struct lttng_event_desc *find_event(const char *name)
-+{
-+ struct lttng_probe_desc *probe_desc;
-+ int i;
-+
-+ list_for_each_entry(probe_desc, &probe_list, head) {
-+ for (i = 0; i < probe_desc->nr_events; i++) {
-+ if (!strcmp(probe_desc->event_desc[i]->name, name))
-+ return probe_desc->event_desc[i];
-+ }
-+ }
-+ return NULL;
-+}
-+
-+int ltt_probe_register(struct lttng_probe_desc *desc)
-+{
-+ int ret = 0;
-+ int i;
-+
-+ mutex_lock(&probe_mutex);
-+ /*
-+ * TODO: This is O(N^2). Turn into a hash table when probe registration
-+ * overhead becomes an issue.
-+ */
-+ for (i = 0; i < desc->nr_events; i++) {
-+ if (find_event(desc->event_desc[i]->name)) {
-+ ret = -EEXIST;
-+ goto end;
-+ }
-+ }
-+ list_add(&desc->head, &probe_list);
-+end:
-+ mutex_unlock(&probe_mutex);
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(ltt_probe_register);
-+
-+void ltt_probe_unregister(struct lttng_probe_desc *desc)
-+{
-+ mutex_lock(&probe_mutex);
-+ list_del(&desc->head);
-+ mutex_unlock(&probe_mutex);
-+}
-+EXPORT_SYMBOL_GPL(ltt_probe_unregister);
-+
-+const struct lttng_event_desc *ltt_event_get(const char *name)
-+{
-+ const struct lttng_event_desc *event;
-+ int ret;
-+
-+ mutex_lock(&probe_mutex);
-+ event = find_event(name);
-+ mutex_unlock(&probe_mutex);
-+ if (!event)
-+ return NULL;
-+ ret = try_module_get(event->owner);
-+ WARN_ON_ONCE(!ret);
-+ return event;
-+}
-+EXPORT_SYMBOL_GPL(ltt_event_get);
-+
-+void ltt_event_put(const struct lttng_event_desc *event)
-+{
-+ module_put(event->owner);
-+}
-+EXPORT_SYMBOL_GPL(ltt_event_put);
-+
-+static
-+void *tp_list_start(struct seq_file *m, loff_t *pos)
-+{
-+ struct lttng_probe_desc *probe_desc;
-+ int iter = 0, i;
-+
-+ mutex_lock(&probe_mutex);
-+ list_for_each_entry(probe_desc, &probe_list, head) {
-+ for (i = 0; i < probe_desc->nr_events; i++) {
-+ if (iter++ >= *pos)
-+ return (void *) probe_desc->event_desc[i];
-+ }
-+ }
-+ /* End of list */
-+ return NULL;
-+}
-+
-+static
-+void *tp_list_next(struct seq_file *m, void *p, loff_t *ppos)
-+{
-+ struct lttng_probe_desc *probe_desc;
-+ int iter = 0, i;
-+
-+ (*ppos)++;
-+ list_for_each_entry(probe_desc, &probe_list, head) {
-+ for (i = 0; i < probe_desc->nr_events; i++) {
-+ if (iter++ >= *ppos)
-+ return (void *) probe_desc->event_desc[i];
-+ }
-+ }
-+ /* End of list */
-+ return NULL;
-+}
-+
-+static
-+void tp_list_stop(struct seq_file *m, void *p)
-+{
-+ mutex_unlock(&probe_mutex);
-+}
-+
-+static
-+int tp_list_show(struct seq_file *m, void *p)
-+{
-+ const struct lttng_event_desc *probe_desc = p;
-+
-+ /*
-+ * Don't export lttng internal events (metadata).
-+ */
-+ if (!strncmp(probe_desc->name, "lttng_", sizeof("lttng_") - 1))
-+ return 0;
-+ seq_printf(m, "event { name = %s; };\n",
-+ probe_desc->name);
-+ return 0;
-+}
-+
-+static
-+const struct seq_operations lttng_tracepoint_list_seq_ops = {
-+ .start = tp_list_start,
-+ .next = tp_list_next,
-+ .stop = tp_list_stop,
-+ .show = tp_list_show,
-+};
-+
-+static
-+int lttng_tracepoint_list_open(struct inode *inode, struct file *file)
-+{
-+ return seq_open(file, &lttng_tracepoint_list_seq_ops);
-+}
-+
-+const struct file_operations lttng_tracepoint_list_fops = {
-+ .owner = THIS_MODULE,
-+ .open = lttng_tracepoint_list_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = seq_release,
-+};
-diff --git a/drivers/staging/lttng/ltt-tracer-core.h b/drivers/staging/lttng/ltt-tracer-core.h
-new file mode 100644
-index 0000000..5abc432
---- /dev/null
-+++ b/drivers/staging/lttng/ltt-tracer-core.h
-@@ -0,0 +1,28 @@
-+#ifndef LTT_TRACER_CORE_H
-+#define LTT_TRACER_CORE_H
-+
-+/*
-+ * ltt-tracer-core.h
-+ *
-+ * Copyright (C) 2005-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This contains the core definitions for the Linux Trace Toolkit.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/list.h>
-+#include <linux/percpu.h>
-+
-+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-+/* Align data on its natural alignment */
-+#define RING_BUFFER_ALIGN
-+#endif
-+
-+#include "wrapper/ringbuffer/config.h"
-+
-+struct ltt_session;
-+struct ltt_channel;
-+struct ltt_event;
-+
-+#endif /* LTT_TRACER_CORE_H */
-diff --git a/drivers/staging/lttng/ltt-tracer.h b/drivers/staging/lttng/ltt-tracer.h
-new file mode 100644
-index 0000000..a21c38c
---- /dev/null
-+++ b/drivers/staging/lttng/ltt-tracer.h
-@@ -0,0 +1,67 @@
-+#ifndef _LTT_TRACER_H
-+#define _LTT_TRACER_H
-+
-+/*
-+ * ltt-tracer.h
-+ *
-+ * Copyright (C) 2005-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This contains the definitions for the Linux Trace Toolkit tracer.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <stdarg.h>
-+#include <linux/types.h>
-+#include <linux/limits.h>
-+#include <linux/list.h>
-+#include <linux/cache.h>
-+#include <linux/timex.h>
-+#include <linux/wait.h>
-+#include <asm/atomic.h>
-+#include <asm/local.h>
-+
-+#include "wrapper/trace-clock.h"
-+#include "ltt-tracer-core.h"
-+#include "ltt-events.h"
-+
-+#define LTTNG_VERSION 0
-+#define LTTNG_PATCHLEVEL 9
-+#define LTTNG_SUBLEVEL 1
-+
-+#ifndef CHAR_BIT
-+#define CHAR_BIT 8
-+#endif
-+
-+/* Number of bytes to log with a read/write event */
-+#define LTT_LOG_RW_SIZE 32L
-+#define LTT_MAX_SMALL_SIZE 0xFFFFU
-+
-+#ifdef RING_BUFFER_ALIGN
-+#define ltt_alignof(type) __alignof__(type)
-+#else
-+#define ltt_alignof(type) 1
-+#endif
-+
-+/* Tracer properties */
-+#define CTF_MAGIC_NUMBER 0xC1FC1FC1
-+#define TSDL_MAGIC_NUMBER 0x75D11D57
-+
-+/* CTF specification version followed */
-+#define CTF_SPEC_MAJOR 1
-+#define CTF_SPEC_MINOR 8
-+
-+/* Tracer major/minor versions */
-+#define CTF_VERSION_MAJOR 0
-+#define CTF_VERSION_MINOR 1
-+
-+/*
-+ * Number of milliseconds to retry before failing metadata writes on buffer full
-+ * condition. (10 seconds)
-+ */
-+#define LTTNG_METADATA_TIMEOUT_MSEC 10000
-+
-+#define LTT_RFLAG_EXTENDED RING_BUFFER_RFLAG_END
-+#define LTT_RFLAG_END (LTT_RFLAG_EXTENDED << 1)
-+
-+#endif /* _LTT_TRACER_H */
---
-1.7.9
-
diff --git a/patches.lttng/0011-lttng-dynamically-selectable-context-information.patch b/patches.lttng/0011-lttng-dynamically-selectable-context-information.patch
deleted file mode 100644
index e10ed06438f..00000000000
--- a/patches.lttng/0011-lttng-dynamically-selectable-context-information.patch
+++ /dev/null
@@ -1,1131 +0,0 @@
-From 6c19da3578bc0ae0d3a65560b7ac7963a35ea79c Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 28 Nov 2011 07:42:19 -0500
-Subject: lttng: dynamically selectable context information
-
-Events can be augmented with context information. This is dynamically
-configurable from the command line.
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- drivers/staging/lttng/ltt-context.c | 93 +++++++
- drivers/staging/lttng/lttng-context-nice.c | 68 +++++
- .../staging/lttng/lttng-context-perf-counters.c | 271 ++++++++++++++++++++
- drivers/staging/lttng/lttng-context-pid.c | 68 +++++
- drivers/staging/lttng/lttng-context-ppid.c | 71 +++++
- drivers/staging/lttng/lttng-context-prio.c | 89 +++++++
- drivers/staging/lttng/lttng-context-procname.c | 72 +++++
- drivers/staging/lttng/lttng-context-tid.c | 68 +++++
- drivers/staging/lttng/lttng-context-vpid.c | 74 ++++++
- drivers/staging/lttng/lttng-context-vppid.c | 79 ++++++
- drivers/staging/lttng/lttng-context-vtid.c | 74 ++++++
- 11 files changed, 1027 insertions(+), 0 deletions(-)
- create mode 100644 drivers/staging/lttng/ltt-context.c
- create mode 100644 drivers/staging/lttng/lttng-context-nice.c
- create mode 100644 drivers/staging/lttng/lttng-context-perf-counters.c
- create mode 100644 drivers/staging/lttng/lttng-context-pid.c
- create mode 100644 drivers/staging/lttng/lttng-context-ppid.c
- create mode 100644 drivers/staging/lttng/lttng-context-prio.c
- create mode 100644 drivers/staging/lttng/lttng-context-procname.c
- create mode 100644 drivers/staging/lttng/lttng-context-tid.c
- create mode 100644 drivers/staging/lttng/lttng-context-vpid.c
- create mode 100644 drivers/staging/lttng/lttng-context-vppid.c
- create mode 100644 drivers/staging/lttng/lttng-context-vtid.c
-
-diff --git a/drivers/staging/lttng/ltt-context.c b/drivers/staging/lttng/ltt-context.c
-new file mode 100644
-index 0000000..60ea525
---- /dev/null
-+++ b/drivers/staging/lttng/ltt-context.c
-@@ -0,0 +1,93 @@
-+/*
-+ * ltt-context.c
-+ *
-+ * Copyright 2011 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng trace/channel/event context management.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/list.h>
-+#include <linux/mutex.h>
-+#include <linux/slab.h>
-+#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
-+#include "ltt-events.h"
-+#include "ltt-tracer.h"
-+
-+int lttng_find_context(struct lttng_ctx *ctx, const char *name)
-+{
-+ unsigned int i;
-+
-+ for (i = 0; i < ctx->nr_fields; i++) {
-+ /* Skip allocated (but non-initialized) contexts */
-+ if (!ctx->fields[i].event_field.name)
-+ continue;
-+ if (!strcmp(ctx->fields[i].event_field.name, name))
-+ return 1;
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(lttng_find_context);
-+
-+/*
-+ * Note: as we append context information, the pointer location may change.
-+ */
-+struct lttng_ctx_field *lttng_append_context(struct lttng_ctx **ctx_p)
-+{
-+ struct lttng_ctx_field *field;
-+ struct lttng_ctx *ctx;
-+
-+ if (!*ctx_p) {
-+ *ctx_p = kzalloc(sizeof(struct lttng_ctx), GFP_KERNEL);
-+ if (!*ctx_p)
-+ return NULL;
-+ }
-+ ctx = *ctx_p;
-+ if (ctx->nr_fields + 1 > ctx->allocated_fields) {
-+ struct lttng_ctx_field *new_fields;
-+
-+ ctx->allocated_fields = max_t(size_t, 1, 2 * ctx->allocated_fields);
-+ new_fields = kzalloc(ctx->allocated_fields * sizeof(struct lttng_ctx_field), GFP_KERNEL);
-+ if (!new_fields)
-+ return NULL;
-+ if (ctx->fields)
-+ memcpy(new_fields, ctx->fields, sizeof(*ctx->fields) * ctx->nr_fields);
-+ kfree(ctx->fields);
-+ ctx->fields = new_fields;
-+ }
-+ field = &ctx->fields[ctx->nr_fields];
-+ ctx->nr_fields++;
-+ return field;
-+}
-+EXPORT_SYMBOL_GPL(lttng_append_context);
-+
-+/*
-+ * Remove last context field.
-+ */
-+void lttng_remove_context_field(struct lttng_ctx **ctx_p,
-+ struct lttng_ctx_field *field)
-+{
-+ struct lttng_ctx *ctx;
-+
-+ ctx = *ctx_p;
-+ ctx->nr_fields--;
-+ WARN_ON_ONCE(&ctx->fields[ctx->nr_fields] != field);
-+ memset(&ctx->fields[ctx->nr_fields], 0, sizeof(struct lttng_ctx_field));
-+}
-+EXPORT_SYMBOL_GPL(lttng_remove_context_field);
-+
-+void lttng_destroy_context(struct lttng_ctx *ctx)
-+{
-+ int i;
-+
-+ if (!ctx)
-+ return;
-+ for (i = 0; i < ctx->nr_fields; i++) {
-+ if (ctx->fields[i].destroy)
-+ ctx->fields[i].destroy(&ctx->fields[i]);
-+ }
-+ kfree(ctx->fields);
-+ kfree(ctx);
-+}
-diff --git a/drivers/staging/lttng/lttng-context-nice.c b/drivers/staging/lttng/lttng-context-nice.c
-new file mode 100644
-index 0000000..9b99b54
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-context-nice.c
-@@ -0,0 +1,68 @@
-+/*
-+ * (C) Copyright 2009-2011 -
-+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng nice context.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/sched.h>
-+#include "ltt-events.h"
-+#include "wrapper/ringbuffer/frontend_types.h"
-+#include "wrapper/vmalloc.h"
-+#include "ltt-tracer.h"
-+
-+static
-+size_t nice_get_size(size_t offset)
-+{
-+ size_t size = 0;
-+
-+ size += lib_ring_buffer_align(offset, ltt_alignof(int));
-+ size += sizeof(int);
-+ return size;
-+}
-+
-+static
-+void nice_record(struct lttng_ctx_field *field,
-+ struct lib_ring_buffer_ctx *ctx,
-+ struct ltt_channel *chan)
-+{
-+ int nice;
-+
-+ nice = task_nice(current);
-+ lib_ring_buffer_align_ctx(ctx, ltt_alignof(nice));
-+ chan->ops->event_write(ctx, &nice, sizeof(nice));
-+}
-+
-+int lttng_add_nice_to_ctx(struct lttng_ctx **ctx)
-+{
-+ struct lttng_ctx_field *field;
-+
-+ field = lttng_append_context(ctx);
-+ if (!field)
-+ return -ENOMEM;
-+ if (lttng_find_context(*ctx, "nice")) {
-+ lttng_remove_context_field(ctx, field);
-+ return -EEXIST;
-+ }
-+ field->event_field.name = "nice";
-+ field->event_field.type.atype = atype_integer;
-+ field->event_field.type.u.basic.integer.size = sizeof(int) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.alignment = ltt_alignof(int) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.signedness = is_signed_type(int);
-+ field->event_field.type.u.basic.integer.reverse_byte_order = 0;
-+ field->event_field.type.u.basic.integer.base = 10;
-+ field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
-+ field->get_size = nice_get_size;
-+ field->record = nice_record;
-+ wrapper_vmalloc_sync_all();
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(lttng_add_nice_to_ctx);
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("Linux Trace Toolkit Nice Context");
-diff --git a/drivers/staging/lttng/lttng-context-perf-counters.c b/drivers/staging/lttng/lttng-context-perf-counters.c
-new file mode 100644
-index 0000000..3ae2266
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-context-perf-counters.c
-@@ -0,0 +1,271 @@
-+/*
-+ * (C) Copyright 2009-2011 -
-+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng performance monitoring counters (perf-counters) integration module.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/perf_event.h>
-+#include <linux/list.h>
-+#include <linux/string.h>
-+#include "ltt-events.h"
-+#include "wrapper/ringbuffer/frontend_types.h"
-+#include "wrapper/vmalloc.h"
-+#include "wrapper/perf.h"
-+#include "ltt-tracer.h"
-+
-+static
-+size_t perf_counter_get_size(size_t offset)
-+{
-+ size_t size = 0;
-+
-+ size += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
-+ size += sizeof(uint64_t);
-+ return size;
-+}
-+
-+static
-+void perf_counter_record(struct lttng_ctx_field *field,
-+ struct lib_ring_buffer_ctx *ctx,
-+ struct ltt_channel *chan)
-+{
-+ struct perf_event *event;
-+ uint64_t value;
-+
-+ event = field->u.perf_counter->e[ctx->cpu];
-+ if (likely(event)) {
-+ if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) {
-+ value = 0;
-+ } else {
-+ event->pmu->read(event);
-+ value = local64_read(&event->count);
-+ }
-+ } else {
-+ /*
-+ * Perf chooses not to be clever and not to support enabling a
-+ * perf counter before the cpu is brought up. Therefore, we need
-+ * to support having events coming (e.g. scheduler events)
-+ * before the counter is setup. Write an arbitrary 0 in this
-+ * case.
-+ */
-+ value = 0;
-+ }
-+ lib_ring_buffer_align_ctx(ctx, ltt_alignof(value));
-+ chan->ops->event_write(ctx, &value, sizeof(value));
-+}
-+
-+#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99))
-+static
-+void overflow_callback(struct perf_event *event,
-+ struct perf_sample_data *data,
-+ struct pt_regs *regs)
-+{
-+}
-+#else
-+static
-+void overflow_callback(struct perf_event *event, int nmi,
-+ struct perf_sample_data *data,
-+ struct pt_regs *regs)
-+{
-+}
-+#endif
-+
-+static
-+void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
-+{
-+ struct perf_event **events = field->u.perf_counter->e;
-+ int cpu;
-+
-+ get_online_cpus();
-+ for_each_online_cpu(cpu)
-+ perf_event_release_kernel(events[cpu]);
-+ put_online_cpus();
-+#ifdef CONFIG_HOTPLUG_CPU
-+ unregister_cpu_notifier(&field->u.perf_counter->nb);
-+#endif
-+ kfree(field->event_field.name);
-+ kfree(field->u.perf_counter->attr);
-+ kfree(events);
-+ kfree(field->u.perf_counter);
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+
-+/**
-+ * lttng_perf_counter_hp_callback - CPU hotplug callback
-+ * @nb: notifier block
-+ * @action: hotplug action to take
-+ * @hcpu: CPU number
-+ *
-+ * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
-+ *
-+ * We can setup perf counters when the cpu is online (up prepare seems to be too
-+ * soon).
-+ */
-+static
-+int __cpuinit lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
-+ unsigned long action,
-+ void *hcpu)
-+{
-+ unsigned int cpu = (unsigned long) hcpu;
-+ struct lttng_perf_counter_field *perf_field =
-+ container_of(nb, struct lttng_perf_counter_field, nb);
-+ struct perf_event **events = perf_field->e;
-+ struct perf_event_attr *attr = perf_field->attr;
-+ struct perf_event *pevent;
-+
-+ if (!perf_field->hp_enable)
-+ return NOTIFY_OK;
-+
-+ switch (action) {
-+ case CPU_ONLINE:
-+ case CPU_ONLINE_FROZEN:
-+ pevent = wrapper_perf_event_create_kernel_counter(attr,
-+ cpu, NULL, overflow_callback);
-+ if (!pevent || IS_ERR(pevent))
-+ return NOTIFY_BAD;
-+ if (pevent->state == PERF_EVENT_STATE_ERROR) {
-+ perf_event_release_kernel(pevent);
-+ return NOTIFY_BAD;
-+ }
-+ barrier(); /* Create perf counter before setting event */
-+ events[cpu] = pevent;
-+ break;
-+ case CPU_UP_CANCELED:
-+ case CPU_UP_CANCELED_FROZEN:
-+ case CPU_DEAD:
-+ case CPU_DEAD_FROZEN:
-+ pevent = events[cpu];
-+ events[cpu] = NULL;
-+ barrier(); /* NULLify event before perf counter teardown */
-+ perf_event_release_kernel(pevent);
-+ break;
-+ }
-+ return NOTIFY_OK;
-+}
-+
-+#endif
-+
-+int lttng_add_perf_counter_to_ctx(uint32_t type,
-+ uint64_t config,
-+ const char *name,
-+ struct lttng_ctx **ctx)
-+{
-+ struct lttng_ctx_field *field;
-+ struct lttng_perf_counter_field *perf_field;
-+ struct perf_event **events;
-+ struct perf_event_attr *attr;
-+ int ret;
-+ int cpu;
-+ char *name_alloc;
-+
-+ events = kzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
-+ if (!events)
-+ return -ENOMEM;
-+
-+ attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
-+ if (!attr) {
-+ ret = -ENOMEM;
-+ goto error_attr;
-+ }
-+
-+ attr->type = type;
-+ attr->config = config;
-+ attr->size = sizeof(struct perf_event_attr);
-+ attr->pinned = 1;
-+ attr->disabled = 0;
-+
-+ perf_field = kzalloc(sizeof(struct lttng_perf_counter_field), GFP_KERNEL);
-+ if (!perf_field) {
-+ ret = -ENOMEM;
-+ goto error_alloc_perf_field;
-+ }
-+ perf_field->e = events;
-+ perf_field->attr = attr;
-+
-+ name_alloc = kstrdup(name, GFP_KERNEL);
-+ if (!name_alloc) {
-+ ret = -ENOMEM;
-+ goto name_alloc_error;
-+ }
-+
-+ field = lttng_append_context(ctx);
-+ if (!field) {
-+ ret = -ENOMEM;
-+ goto append_context_error;
-+ }
-+ if (lttng_find_context(*ctx, name_alloc)) {
-+ ret = -EEXIST;
-+ goto find_error;
-+ }
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+ perf_field->nb.notifier_call =
-+ lttng_perf_counter_cpu_hp_callback;
-+ perf_field->nb.priority = 0;
-+ register_cpu_notifier(&perf_field->nb);
-+#endif
-+
-+ get_online_cpus();
-+ for_each_online_cpu(cpu) {
-+ events[cpu] = wrapper_perf_event_create_kernel_counter(attr,
-+ cpu, NULL, overflow_callback);
-+ if (!events[cpu] || IS_ERR(events[cpu])) {
-+ ret = -EINVAL;
-+ goto counter_error;
-+ }
-+ if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
-+ ret = -EBUSY;
-+ goto counter_busy;
-+ }
-+ }
-+ put_online_cpus();
-+
-+ field->destroy = lttng_destroy_perf_counter_field;
-+
-+ field->event_field.name = name_alloc;
-+ field->event_field.type.atype = atype_integer;
-+ field->event_field.type.u.basic.integer.size = sizeof(uint64_t) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.alignment = ltt_alignof(uint64_t) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.signedness = is_signed_type(uint64_t);
-+ field->event_field.type.u.basic.integer.reverse_byte_order = 0;
-+ field->event_field.type.u.basic.integer.base = 10;
-+ field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
-+ field->get_size = perf_counter_get_size;
-+ field->record = perf_counter_record;
-+ field->u.perf_counter = perf_field;
-+ perf_field->hp_enable = 1;
-+
-+ wrapper_vmalloc_sync_all();
-+ return 0;
-+
-+counter_busy:
-+counter_error:
-+ for_each_online_cpu(cpu) {
-+ if (events[cpu] && !IS_ERR(events[cpu]))
-+ perf_event_release_kernel(events[cpu]);
-+ }
-+ put_online_cpus();
-+#ifdef CONFIG_HOTPLUG_CPU
-+ unregister_cpu_notifier(&perf_field->nb);
-+#endif
-+find_error:
-+ lttng_remove_context_field(ctx, field);
-+append_context_error:
-+ kfree(name_alloc);
-+name_alloc_error:
-+ kfree(perf_field);
-+error_alloc_perf_field:
-+ kfree(attr);
-+error_attr:
-+ kfree(events);
-+ return ret;
-+}
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");
-diff --git a/drivers/staging/lttng/lttng-context-pid.c b/drivers/staging/lttng/lttng-context-pid.c
-new file mode 100644
-index 0000000..698b242
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-context-pid.c
-@@ -0,0 +1,68 @@
-+/*
-+ * (C) Copyright 2009-2011 -
-+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng PID context.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/sched.h>
-+#include "ltt-events.h"
-+#include "wrapper/ringbuffer/frontend_types.h"
-+#include "wrapper/vmalloc.h"
-+#include "ltt-tracer.h"
-+
-+static
-+size_t pid_get_size(size_t offset)
-+{
-+ size_t size = 0;
-+
-+ size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
-+ size += sizeof(pid_t);
-+ return size;
-+}
-+
-+static
-+void pid_record(struct lttng_ctx_field *field,
-+ struct lib_ring_buffer_ctx *ctx,
-+ struct ltt_channel *chan)
-+{
-+ pid_t pid;
-+
-+ pid = task_tgid_nr(current);
-+ lib_ring_buffer_align_ctx(ctx, ltt_alignof(pid));
-+ chan->ops->event_write(ctx, &pid, sizeof(pid));
-+}
-+
-+int lttng_add_pid_to_ctx(struct lttng_ctx **ctx)
-+{
-+ struct lttng_ctx_field *field;
-+
-+ field = lttng_append_context(ctx);
-+ if (!field)
-+ return -ENOMEM;
-+ if (lttng_find_context(*ctx, "pid")) {
-+ lttng_remove_context_field(ctx, field);
-+ return -EEXIST;
-+ }
-+ field->event_field.name = "pid";
-+ field->event_field.type.atype = atype_integer;
-+ field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
-+ field->event_field.type.u.basic.integer.reverse_byte_order = 0;
-+ field->event_field.type.u.basic.integer.base = 10;
-+ field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
-+ field->get_size = pid_get_size;
-+ field->record = pid_record;
-+ wrapper_vmalloc_sync_all();
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(lttng_add_pid_to_ctx);
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("Linux Trace Toolkit PID Context");
-diff --git a/drivers/staging/lttng/lttng-context-ppid.c b/drivers/staging/lttng/lttng-context-ppid.c
-new file mode 100644
-index 0000000..738f7e6
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-context-ppid.c
-@@ -0,0 +1,71 @@
-+/*
-+ * (C) Copyright 2009-2011 -
-+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng PPID context.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/sched.h>
-+#include <linux/syscalls.h>
-+#include "ltt-events.h"
-+#include "wrapper/ringbuffer/frontend_types.h"
-+#include "wrapper/vmalloc.h"
-+#include "ltt-tracer.h"
-+
-+static
-+size_t ppid_get_size(size_t offset)
-+{
-+ size_t size = 0;
-+
-+ size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
-+ size += sizeof(pid_t);
-+ return size;
-+}
-+
-+static
-+void ppid_record(struct lttng_ctx_field *field,
-+ struct lib_ring_buffer_ctx *ctx,
-+ struct ltt_channel *chan)
-+{
-+ pid_t ppid;
-+
-+ rcu_read_lock();
-+ ppid = task_tgid_nr(current->real_parent);
-+ rcu_read_unlock();
-+ lib_ring_buffer_align_ctx(ctx, ltt_alignof(ppid));
-+ chan->ops->event_write(ctx, &ppid, sizeof(ppid));
-+}
-+
-+int lttng_add_ppid_to_ctx(struct lttng_ctx **ctx)
-+{
-+ struct lttng_ctx_field *field;
-+
-+ field = lttng_append_context(ctx);
-+ if (!field)
-+ return -ENOMEM;
-+ if (lttng_find_context(*ctx, "ppid")) {
-+ lttng_remove_context_field(ctx, field);
-+ return -EEXIST;
-+ }
-+ field->event_field.name = "ppid";
-+ field->event_field.type.atype = atype_integer;
-+ field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
-+ field->event_field.type.u.basic.integer.reverse_byte_order = 0;
-+ field->event_field.type.u.basic.integer.base = 10;
-+ field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
-+ field->get_size = ppid_get_size;
-+ field->record = ppid_record;
-+ wrapper_vmalloc_sync_all();
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(lttng_add_ppid_to_ctx);
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("Linux Trace Toolkit PPID Context");
-diff --git a/drivers/staging/lttng/lttng-context-prio.c b/drivers/staging/lttng/lttng-context-prio.c
-new file mode 100644
-index 0000000..1ee3a54
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-context-prio.c
-@@ -0,0 +1,89 @@
-+/*
-+ * (C) Copyright 2009-2011 -
-+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng priority context.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/sched.h>
-+#include "ltt-events.h"
-+#include "wrapper/ringbuffer/frontend_types.h"
-+#include "wrapper/vmalloc.h"
-+#include "wrapper/kallsyms.h"
-+#include "ltt-tracer.h"
-+
-+static
-+int (*wrapper_task_prio_sym)(struct task_struct *t);
-+
-+int wrapper_task_prio_init(void)
-+{
-+ wrapper_task_prio_sym = (void *) kallsyms_lookup_funcptr("task_prio");
-+ if (!wrapper_task_prio_sym) {
-+ printk(KERN_WARNING "LTTng: task_prio symbol lookup failed.\n");
-+ return -EINVAL;
-+ }
-+ return 0;
-+}
-+
-+static
-+size_t prio_get_size(size_t offset)
-+{
-+ size_t size = 0;
-+
-+ size += lib_ring_buffer_align(offset, ltt_alignof(int));
-+ size += sizeof(int);
-+ return size;
-+}
-+
-+static
-+void prio_record(struct lttng_ctx_field *field,
-+ struct lib_ring_buffer_ctx *ctx,
-+ struct ltt_channel *chan)
-+{
-+ int prio;
-+
-+ prio = wrapper_task_prio_sym(current);
-+ lib_ring_buffer_align_ctx(ctx, ltt_alignof(prio));
-+ chan->ops->event_write(ctx, &prio, sizeof(prio));
-+}
-+
-+int lttng_add_prio_to_ctx(struct lttng_ctx **ctx)
-+{
-+ struct lttng_ctx_field *field;
-+ int ret;
-+
-+ if (!wrapper_task_prio_sym) {
-+ ret = wrapper_task_prio_init();
-+ if (ret)
-+ return ret;
-+ }
-+
-+ field = lttng_append_context(ctx);
-+ if (!field)
-+ return -ENOMEM;
-+ if (lttng_find_context(*ctx, "prio")) {
-+ lttng_remove_context_field(ctx, field);
-+ return -EEXIST;
-+ }
-+ field->event_field.name = "prio";
-+ field->event_field.type.atype = atype_integer;
-+ field->event_field.type.u.basic.integer.size = sizeof(int) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.alignment = ltt_alignof(int) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.signedness = is_signed_type(int);
-+ field->event_field.type.u.basic.integer.reverse_byte_order = 0;
-+ field->event_field.type.u.basic.integer.base = 10;
-+ field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
-+ field->get_size = prio_get_size;
-+ field->record = prio_record;
-+ wrapper_vmalloc_sync_all();
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(lttng_add_prio_to_ctx);
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("Linux Trace Toolkit Priority Context");
-diff --git a/drivers/staging/lttng/lttng-context-procname.c b/drivers/staging/lttng/lttng-context-procname.c
-new file mode 100644
-index 0000000..c6bc646
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-context-procname.c
-@@ -0,0 +1,72 @@
-+/*
-+ * (C) Copyright 2009-2011 -
-+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng procname context.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/sched.h>
-+#include "ltt-events.h"
-+#include "wrapper/ringbuffer/frontend_types.h"
-+#include "wrapper/vmalloc.h"
-+#include "ltt-tracer.h"
-+
-+static
-+size_t procname_get_size(size_t offset)
-+{
-+ size_t size = 0;
-+
-+ size += sizeof(current->comm);
-+ return size;
-+}
-+
-+/*
-+ * Racy read of procname. We simply copy its whole array size.
-+ * Races with /proc/<task>/procname write only.
-+ * Otherwise having to take a mutex for each event is cumbersome and
-+ * could lead to crash in IRQ context and deadlock of the lockdep tracer.
-+ */
-+static
-+void procname_record(struct lttng_ctx_field *field,
-+ struct lib_ring_buffer_ctx *ctx,
-+ struct ltt_channel *chan)
-+{
-+ chan->ops->event_write(ctx, current->comm, sizeof(current->comm));
-+}
-+
-+int lttng_add_procname_to_ctx(struct lttng_ctx **ctx)
-+{
-+ struct lttng_ctx_field *field;
-+
-+ field = lttng_append_context(ctx);
-+ if (!field)
-+ return -ENOMEM;
-+ if (lttng_find_context(*ctx, "procname")) {
-+ lttng_remove_context_field(ctx, field);
-+ return -EEXIST;
-+ }
-+ field->event_field.name = "procname";
-+ field->event_field.type.atype = atype_array;
-+ field->event_field.type.u.array.elem_type.atype = atype_integer;
-+ field->event_field.type.u.array.elem_type.u.basic.integer.size = sizeof(char) * CHAR_BIT;
-+ field->event_field.type.u.array.elem_type.u.basic.integer.alignment = ltt_alignof(char) * CHAR_BIT;
-+ field->event_field.type.u.array.elem_type.u.basic.integer.signedness = is_signed_type(char);
-+ field->event_field.type.u.array.elem_type.u.basic.integer.reverse_byte_order = 0;
-+ field->event_field.type.u.array.elem_type.u.basic.integer.base = 10;
-+ field->event_field.type.u.array.elem_type.u.basic.integer.encoding = lttng_encode_UTF8;
-+ field->event_field.type.u.array.length = sizeof(current->comm);
-+
-+ field->get_size = procname_get_size;
-+ field->record = procname_record;
-+ wrapper_vmalloc_sync_all();
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(lttng_add_procname_to_ctx);
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");
-diff --git a/drivers/staging/lttng/lttng-context-tid.c b/drivers/staging/lttng/lttng-context-tid.c
-new file mode 100644
-index 0000000..d5ccdb6
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-context-tid.c
-@@ -0,0 +1,68 @@
-+/*
-+ * (C) Copyright 2009-2011 -
-+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng TID context.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/sched.h>
-+#include "ltt-events.h"
-+#include "wrapper/ringbuffer/frontend_types.h"
-+#include "wrapper/vmalloc.h"
-+#include "ltt-tracer.h"
-+
-+static
-+size_t tid_get_size(size_t offset)
-+{
-+ size_t size = 0;
-+
-+ size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
-+ size += sizeof(pid_t);
-+ return size;
-+}
-+
-+static
-+void tid_record(struct lttng_ctx_field *field,
-+ struct lib_ring_buffer_ctx *ctx,
-+ struct ltt_channel *chan)
-+{
-+ pid_t tid;
-+
-+ tid = task_pid_nr(current);
-+ lib_ring_buffer_align_ctx(ctx, ltt_alignof(tid));
-+ chan->ops->event_write(ctx, &tid, sizeof(tid));
-+}
-+
-+int lttng_add_tid_to_ctx(struct lttng_ctx **ctx)
-+{
-+ struct lttng_ctx_field *field;
-+
-+ field = lttng_append_context(ctx);
-+ if (!field)
-+ return -ENOMEM;
-+ if (lttng_find_context(*ctx, "tid")) {
-+ lttng_remove_context_field(ctx, field);
-+ return -EEXIST;
-+ }
-+ field->event_field.name = "tid";
-+ field->event_field.type.atype = atype_integer;
-+ field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
-+ field->event_field.type.u.basic.integer.reverse_byte_order = 0;
-+ field->event_field.type.u.basic.integer.base = 10;
-+ field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
-+ field->get_size = tid_get_size;
-+ field->record = tid_record;
-+ wrapper_vmalloc_sync_all();
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(lttng_add_tid_to_ctx);
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("Linux Trace Toolkit TID Context");
-diff --git a/drivers/staging/lttng/lttng-context-vpid.c b/drivers/staging/lttng/lttng-context-vpid.c
-new file mode 100644
-index 0000000..3f16e03
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-context-vpid.c
-@@ -0,0 +1,74 @@
-+/*
-+ * (C) Copyright 2009-2011 -
-+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng vPID context.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/sched.h>
-+#include "ltt-events.h"
-+#include "wrapper/ringbuffer/frontend_types.h"
-+#include "wrapper/vmalloc.h"
-+#include "ltt-tracer.h"
-+
-+static
-+size_t vpid_get_size(size_t offset)
-+{
-+ size_t size = 0;
-+
-+ size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
-+ size += sizeof(pid_t);
-+ return size;
-+}
-+
-+static
-+void vpid_record(struct lttng_ctx_field *field,
-+ struct lib_ring_buffer_ctx *ctx,
-+ struct ltt_channel *chan)
-+{
-+ pid_t vpid;
-+
-+ /*
-+ * nsproxy can be NULL when scheduled out of exit.
-+ */
-+ if (!current->nsproxy)
-+ vpid = 0;
-+ else
-+ vpid = task_tgid_vnr(current);
-+ lib_ring_buffer_align_ctx(ctx, ltt_alignof(vpid));
-+ chan->ops->event_write(ctx, &vpid, sizeof(vpid));
-+}
-+
-+int lttng_add_vpid_to_ctx(struct lttng_ctx **ctx)
-+{
-+ struct lttng_ctx_field *field;
-+
-+ field = lttng_append_context(ctx);
-+ if (!field)
-+ return -ENOMEM;
-+ if (lttng_find_context(*ctx, "vpid")) {
-+ lttng_remove_context_field(ctx, field);
-+ return -EEXIST;
-+ }
-+ field->event_field.name = "vpid";
-+ field->event_field.type.atype = atype_integer;
-+ field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
-+ field->event_field.type.u.basic.integer.reverse_byte_order = 0;
-+ field->event_field.type.u.basic.integer.base = 10;
-+ field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
-+ field->get_size = vpid_get_size;
-+ field->record = vpid_record;
-+ wrapper_vmalloc_sync_all();
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(lttng_add_vpid_to_ctx);
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("Linux Trace Toolkit vPID Context");
-diff --git a/drivers/staging/lttng/lttng-context-vppid.c b/drivers/staging/lttng/lttng-context-vppid.c
-new file mode 100644
-index 0000000..f01b020
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-context-vppid.c
-@@ -0,0 +1,79 @@
-+/*
-+ * (C) Copyright 2009-2011 -
-+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng vPPID context.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/sched.h>
-+#include <linux/syscalls.h>
-+#include "ltt-events.h"
-+#include "wrapper/ringbuffer/frontend_types.h"
-+#include "wrapper/vmalloc.h"
-+#include "ltt-tracer.h"
-+
-+static
-+size_t vppid_get_size(size_t offset)
-+{
-+ size_t size = 0;
-+
-+ size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
-+ size += sizeof(pid_t);
-+ return size;
-+}
-+
-+static
-+void vppid_record(struct lttng_ctx_field *field,
-+ struct lib_ring_buffer_ctx *ctx,
-+ struct ltt_channel *chan)
-+{
-+ struct task_struct *parent;
-+ pid_t vppid;
-+
-+ /*
-+ * nsproxy can be NULL when scheduled out of exit.
-+ */
-+ rcu_read_lock();
-+ parent = rcu_dereference(current->real_parent);
-+ if (!parent->nsproxy)
-+ vppid = 0;
-+ else
-+ vppid = task_tgid_vnr(parent);
-+ rcu_read_unlock();
-+ lib_ring_buffer_align_ctx(ctx, ltt_alignof(vppid));
-+ chan->ops->event_write(ctx, &vppid, sizeof(vppid));
-+}
-+
-+int lttng_add_vppid_to_ctx(struct lttng_ctx **ctx)
-+{
-+ struct lttng_ctx_field *field;
-+
-+ field = lttng_append_context(ctx);
-+ if (!field)
-+ return -ENOMEM;
-+ if (lttng_find_context(*ctx, "vppid")) {
-+ lttng_remove_context_field(ctx, field);
-+ return -EEXIST;
-+ }
-+ field->event_field.name = "vppid";
-+ field->event_field.type.atype = atype_integer;
-+ field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
-+ field->event_field.type.u.basic.integer.reverse_byte_order = 0;
-+ field->event_field.type.u.basic.integer.base = 10;
-+ field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
-+ field->get_size = vppid_get_size;
-+ field->record = vppid_record;
-+ wrapper_vmalloc_sync_all();
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(lttng_add_vppid_to_ctx);
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("Linux Trace Toolkit vPPID Context");
-diff --git a/drivers/staging/lttng/lttng-context-vtid.c b/drivers/staging/lttng/lttng-context-vtid.c
-new file mode 100644
-index 0000000..264bbb3
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-context-vtid.c
-@@ -0,0 +1,74 @@
-+/*
-+ * (C) Copyright 2009-2011 -
-+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng vTID context.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/sched.h>
-+#include "ltt-events.h"
-+#include "wrapper/ringbuffer/frontend_types.h"
-+#include "wrapper/vmalloc.h"
-+#include "ltt-tracer.h"
-+
-+static
-+size_t vtid_get_size(size_t offset)
-+{
-+ size_t size = 0;
-+
-+ size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
-+ size += sizeof(pid_t);
-+ return size;
-+}
-+
-+static
-+void vtid_record(struct lttng_ctx_field *field,
-+ struct lib_ring_buffer_ctx *ctx,
-+ struct ltt_channel *chan)
-+{
-+ pid_t vtid;
-+
-+ /*
-+ * nsproxy can be NULL when scheduled out of exit.
-+ */
-+ if (!current->nsproxy)
-+ vtid = 0;
-+ else
-+ vtid = task_pid_vnr(current);
-+ lib_ring_buffer_align_ctx(ctx, ltt_alignof(vtid));
-+ chan->ops->event_write(ctx, &vtid, sizeof(vtid));
-+}
-+
-+int lttng_add_vtid_to_ctx(struct lttng_ctx **ctx)
-+{
-+ struct lttng_ctx_field *field;
-+
-+ field = lttng_append_context(ctx);
-+ if (!field)
-+ return -ENOMEM;
-+ if (lttng_find_context(*ctx, "vtid")) {
-+ lttng_remove_context_field(ctx, field);
-+ return -EEXIST;
-+ }
-+ field->event_field.name = "vtid";
-+ field->event_field.type.atype = atype_integer;
-+ field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
-+ field->event_field.type.u.basic.integer.reverse_byte_order = 0;
-+ field->event_field.type.u.basic.integer.base = 10;
-+ field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
-+ field->get_size = vtid_get_size;
-+ field->record = vtid_record;
-+ wrapper_vmalloc_sync_all();
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(lttng_add_vtid_to_ctx);
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("Linux Trace Toolkit vTID Context");
---
-1.7.9
-
diff --git a/patches.lttng/0012-lttng-timing-calibration-feature.patch b/patches.lttng/0012-lttng-timing-calibration-feature.patch
deleted file mode 100644
index 7dffaf984ad..00000000000
--- a/patches.lttng/0012-lttng-timing-calibration-feature.patch
+++ /dev/null
@@ -1,54 +0,0 @@
-From da66e4e541b21b326a26a36de42f400975da60ac Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 28 Nov 2011 07:42:20 -0500
-Subject: lttng: timing calibration feature
-
-This calibration feature is fairly limited for now, but provides an
-example of how this can be performed.
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- drivers/staging/lttng/lttng-calibrate.c | 30 ++++++++++++++++++++++++++++++
- 1 files changed, 30 insertions(+), 0 deletions(-)
- create mode 100644 drivers/staging/lttng/lttng-calibrate.c
-
-diff --git a/drivers/staging/lttng/lttng-calibrate.c b/drivers/staging/lttng/lttng-calibrate.c
-new file mode 100644
-index 0000000..07e3c5b
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-calibrate.c
-@@ -0,0 +1,30 @@
-+/*
-+ * lttng-calibrate.c
-+ *
-+ * Copyright 2011 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng probe calibration.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include "ltt-debugfs-abi.h"
-+#include "ltt-events.h"
-+
-+noinline
-+void lttng_calibrate_kretprobe(void)
-+{
-+ asm volatile ("");
-+}
-+
-+int lttng_calibrate(struct lttng_kernel_calibrate *calibrate)
-+{
-+ switch (calibrate->type) {
-+ case LTTNG_KERNEL_CALIBRATE_KRETPROBE:
-+ lttng_calibrate_kretprobe();
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+ return 0;
-+}
---
-1.7.9
-
diff --git a/patches.lttng/0013-lttng-debugfs-and-procfs-ABI.patch b/patches.lttng/0013-lttng-debugfs-and-procfs-ABI.patch
deleted file mode 100644
index 9142942f3a3..00000000000
--- a/patches.lttng/0013-lttng-debugfs-and-procfs-ABI.patch
+++ /dev/null
@@ -1,965 +0,0 @@
-From c623c0a3493e87ba60a07eef89dc78274d2e1f4a Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 28 Nov 2011 07:42:21 -0500
-Subject: lttng: debugfs and procfs ABI
-
-Add the "lttng" virtual file to debugfs and procfs. All operations are
-performed through ioctls (LTTng ioctl range is already reserved
-upstream) on this virtual file and on anonymous file descriptors
-returned by these ioctls. Each file descriptor is associated with a
-tracer "object" (session, channel, stream, event, context).
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- drivers/staging/lttng/ltt-debugfs-abi.c | 777 +++++++++++++++++++++++++++++++
- drivers/staging/lttng/ltt-debugfs-abi.h | 153 ++++++
- 2 files changed, 930 insertions(+), 0 deletions(-)
- create mode 100644 drivers/staging/lttng/ltt-debugfs-abi.c
- create mode 100644 drivers/staging/lttng/ltt-debugfs-abi.h
-
-diff --git a/drivers/staging/lttng/ltt-debugfs-abi.c b/drivers/staging/lttng/ltt-debugfs-abi.c
-new file mode 100644
-index 0000000..37cccfa
---- /dev/null
-+++ b/drivers/staging/lttng/ltt-debugfs-abi.c
-@@ -0,0 +1,777 @@
-+/*
-+ * ltt-debugfs-abi.c
-+ *
-+ * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng debugfs ABI
-+ *
-+ * Mimic system calls for:
-+ * - session creation, returns a file descriptor or failure.
-+ * - channel creation, returns a file descriptor or failure.
-+ * - Operates on a session file descriptor
-+ * - Takes all channel options as parameters.
-+ * - stream get, returns a file descriptor or failure.
-+ * - Operates on a channel file descriptor.
-+ * - stream notifier get, returns a file descriptor or failure.
-+ * - Operates on a channel file descriptor.
-+ * - event creation, returns a file descriptor or failure.
-+ * - Operates on a channel file descriptor
-+ * - Takes an event name as parameter
-+ * - Takes an instrumentation source as parameter
-+ * - e.g. tracepoints, dynamic_probes...
-+ * - Takes instrumentation source specific arguments.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/debugfs.h>
-+#include <linux/proc_fs.h>
-+#include <linux/anon_inodes.h>
-+#include <linux/file.h>
-+#include <linux/uaccess.h>
-+#include <linux/slab.h>
-+#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
-+#include "wrapper/ringbuffer/vfs.h"
-+#include "wrapper/poll.h"
-+#include "ltt-debugfs-abi.h"
-+#include "ltt-events.h"
-+#include "ltt-tracer.h"
-+
-+/*
-+ * This is LTTng's own personal way to create a system call as an external
-+ * module. We use ioctl() on /sys/kernel/debug/lttng.
-+ */
-+
-+static struct dentry *lttng_dentry;
-+static struct proc_dir_entry *lttng_proc_dentry;
-+static const struct file_operations lttng_fops;
-+static const struct file_operations lttng_session_fops;
-+static const struct file_operations lttng_channel_fops;
-+static const struct file_operations lttng_metadata_fops;
-+static const struct file_operations lttng_event_fops;
-+
-+/*
-+ * Teardown management: opened file descriptors keep a refcount on the module,
-+ * so it can only exit when all file descriptors are closed.
-+ */
-+
-+enum channel_type {
-+ PER_CPU_CHANNEL,
-+ METADATA_CHANNEL,
-+};
-+
-+static
-+int lttng_abi_create_session(void)
-+{
-+ struct ltt_session *session;
-+ struct file *session_file;
-+ int session_fd, ret;
-+
-+ session = ltt_session_create();
-+ if (!session)
-+ return -ENOMEM;
-+ session_fd = get_unused_fd();
-+ if (session_fd < 0) {
-+ ret = session_fd;
-+ goto fd_error;
-+ }
-+ session_file = anon_inode_getfile("[lttng_session]",
-+ &lttng_session_fops,
-+ session, O_RDWR);
-+ if (IS_ERR(session_file)) {
-+ ret = PTR_ERR(session_file);
-+ goto file_error;
-+ }
-+ session->file = session_file;
-+ fd_install(session_fd, session_file);
-+ return session_fd;
-+
-+file_error:
-+ put_unused_fd(session_fd);
-+fd_error:
-+ ltt_session_destroy(session);
-+ return ret;
-+}
-+
-+static
-+int lttng_abi_tracepoint_list(void)
-+{
-+ struct file *tracepoint_list_file;
-+ int file_fd, ret;
-+
-+ file_fd = get_unused_fd();
-+ if (file_fd < 0) {
-+ ret = file_fd;
-+ goto fd_error;
-+ }
-+
-+ tracepoint_list_file = anon_inode_getfile("[lttng_session]",
-+ &lttng_tracepoint_list_fops,
-+ NULL, O_RDWR);
-+ if (IS_ERR(tracepoint_list_file)) {
-+ ret = PTR_ERR(tracepoint_list_file);
-+ goto file_error;
-+ }
-+ ret = lttng_tracepoint_list_fops.open(NULL, tracepoint_list_file);
-+ if (ret < 0)
-+ goto open_error;
-+ fd_install(file_fd, tracepoint_list_file);
-+ if (file_fd < 0) {
-+ ret = file_fd;
-+ goto fd_error;
-+ }
-+ return file_fd;
-+
-+open_error:
-+ fput(tracepoint_list_file);
-+file_error:
-+ put_unused_fd(file_fd);
-+fd_error:
-+ return ret;
-+}
-+
-+static
-+long lttng_abi_tracer_version(struct file *file,
-+ struct lttng_kernel_tracer_version __user *uversion_param)
-+{
-+ struct lttng_kernel_tracer_version v;
-+
-+ v.version = LTTNG_VERSION;
-+ v.patchlevel = LTTNG_PATCHLEVEL;
-+ v.sublevel = LTTNG_SUBLEVEL;
-+
-+ if (copy_to_user(uversion_param, &v, sizeof(v)))
-+ return -EFAULT;
-+ return 0;
-+}
-+
-+static
-+long lttng_abi_add_context(struct file *file,
-+ struct lttng_kernel_context __user *ucontext_param,
-+ struct lttng_ctx **ctx, struct ltt_session *session)
-+{
-+ struct lttng_kernel_context context_param;
-+
-+ if (session->been_active)
-+ return -EPERM;
-+
-+ if (copy_from_user(&context_param, ucontext_param, sizeof(context_param)))
-+ return -EFAULT;
-+
-+ switch (context_param.ctx) {
-+ case LTTNG_KERNEL_CONTEXT_PID:
-+ return lttng_add_pid_to_ctx(ctx);
-+ case LTTNG_KERNEL_CONTEXT_PRIO:
-+ return lttng_add_prio_to_ctx(ctx);
-+ case LTTNG_KERNEL_CONTEXT_NICE:
-+ return lttng_add_nice_to_ctx(ctx);
-+ case LTTNG_KERNEL_CONTEXT_VPID:
-+ return lttng_add_vpid_to_ctx(ctx);
-+ case LTTNG_KERNEL_CONTEXT_TID:
-+ return lttng_add_tid_to_ctx(ctx);
-+ case LTTNG_KERNEL_CONTEXT_VTID:
-+ return lttng_add_vtid_to_ctx(ctx);
-+ case LTTNG_KERNEL_CONTEXT_PPID:
-+ return lttng_add_ppid_to_ctx(ctx);
-+ case LTTNG_KERNEL_CONTEXT_VPPID:
-+ return lttng_add_vppid_to_ctx(ctx);
-+ case LTTNG_KERNEL_CONTEXT_PERF_COUNTER:
-+ context_param.u.perf_counter.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
-+ return lttng_add_perf_counter_to_ctx(context_param.u.perf_counter.type,
-+ context_param.u.perf_counter.config,
-+ context_param.u.perf_counter.name,
-+ ctx);
-+ case LTTNG_KERNEL_CONTEXT_PROCNAME:
-+ return lttng_add_procname_to_ctx(ctx);
-+ default:
-+ return -EINVAL;
-+ }
-+}
-+
-+/**
-+ * lttng_ioctl - lttng syscall through ioctl
-+ *
-+ * @file: the file
-+ * @cmd: the command
-+ * @arg: command arg
-+ *
-+ * This ioctl implements lttng commands:
-+ * LTTNG_KERNEL_SESSION
-+ * Returns a LTTng trace session file descriptor
-+ * LTTNG_KERNEL_TRACER_VERSION
-+ * Returns the LTTng kernel tracer version
-+ * LTTNG_KERNEL_TRACEPOINT_LIST
-+ * Returns a file descriptor listing available tracepoints
-+ * LTTNG_KERNEL_WAIT_QUIESCENT
-+ * Returns after all previously running probes have completed
-+ *
-+ * The returned session will be deleted when its file descriptor is closed.
-+ */
-+static
-+long lttng_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-+{
-+ switch (cmd) {
-+ case LTTNG_KERNEL_SESSION:
-+ return lttng_abi_create_session();
-+ case LTTNG_KERNEL_TRACER_VERSION:
-+ return lttng_abi_tracer_version(file,
-+ (struct lttng_kernel_tracer_version __user *) arg);
-+ case LTTNG_KERNEL_TRACEPOINT_LIST:
-+ return lttng_abi_tracepoint_list();
-+ case LTTNG_KERNEL_WAIT_QUIESCENT:
-+ synchronize_trace();
-+ return 0;
-+ case LTTNG_KERNEL_CALIBRATE:
-+ {
-+ struct lttng_kernel_calibrate __user *ucalibrate =
-+ (struct lttng_kernel_calibrate __user *) arg;
-+ struct lttng_kernel_calibrate calibrate;
-+ int ret;
-+
-+ if (copy_from_user(&calibrate, ucalibrate, sizeof(calibrate)))
-+ return -EFAULT;
-+ ret = lttng_calibrate(&calibrate);
-+ if (copy_to_user(ucalibrate, &calibrate, sizeof(calibrate)))
-+ return -EFAULT;
-+ return ret;
-+ }
-+ default:
-+ return -ENOIOCTLCMD;
-+ }
-+}
-+
-+static const struct file_operations lttng_fops = {
-+ .owner = THIS_MODULE,
-+ .unlocked_ioctl = lttng_ioctl,
-+#ifdef CONFIG_COMPAT
-+ .compat_ioctl = lttng_ioctl,
-+#endif
-+};
-+
-+/*
-+ * We tolerate no failure in this function (if one happens, we print a dmesg
-+ * error, but cannot return any error, because the channel information is
-+ * invariant.
-+ */
-+static
-+void lttng_metadata_create_events(struct file *channel_file)
-+{
-+ struct ltt_channel *channel = channel_file->private_data;
-+ static struct lttng_kernel_event metadata_params = {
-+ .instrumentation = LTTNG_KERNEL_TRACEPOINT,
-+ .name = "lttng_metadata",
-+ };
-+ struct ltt_event *event;
-+
-+ /*
-+ * We tolerate no failure path after event creation. It will stay
-+ * invariant for the rest of the session.
-+ */
-+ event = ltt_event_create(channel, &metadata_params, NULL, NULL);
-+ if (!event) {
-+ goto create_error;
-+ }
-+ return;
-+
-+create_error:
-+ WARN_ON(1);
-+ return; /* not allowed to return error */
-+}
-+
-+static
-+int lttng_abi_create_channel(struct file *session_file,
-+ struct lttng_kernel_channel __user *uchan_param,
-+ enum channel_type channel_type)
-+{
-+ struct ltt_session *session = session_file->private_data;
-+ const struct file_operations *fops = NULL;
-+ const char *transport_name;
-+ struct ltt_channel *chan;
-+ struct file *chan_file;
-+ struct lttng_kernel_channel chan_param;
-+ int chan_fd;
-+ int ret = 0;
-+
-+ if (copy_from_user(&chan_param, uchan_param, sizeof(chan_param)))
-+ return -EFAULT;
-+ chan_fd = get_unused_fd();
-+ if (chan_fd < 0) {
-+ ret = chan_fd;
-+ goto fd_error;
-+ }
-+ switch (channel_type) {
-+ case PER_CPU_CHANNEL:
-+ fops = &lttng_channel_fops;
-+ break;
-+ case METADATA_CHANNEL:
-+ fops = &lttng_metadata_fops;
-+ break;
-+ }
-+
-+ chan_file = anon_inode_getfile("[lttng_channel]",
-+ fops,
-+ NULL, O_RDWR);
-+ if (IS_ERR(chan_file)) {
-+ ret = PTR_ERR(chan_file);
-+ goto file_error;
-+ }
-+ switch (channel_type) {
-+ case PER_CPU_CHANNEL:
-+ if (chan_param.output == LTTNG_KERNEL_SPLICE) {
-+ transport_name = chan_param.overwrite ?
-+ "relay-overwrite" : "relay-discard";
-+ } else if (chan_param.output == LTTNG_KERNEL_MMAP) {
-+ transport_name = chan_param.overwrite ?
-+ "relay-overwrite-mmap" : "relay-discard-mmap";
-+ } else {
-+ return -EINVAL;
-+ }
-+ break;
-+ case METADATA_CHANNEL:
-+ if (chan_param.output == LTTNG_KERNEL_SPLICE)
-+ transport_name = "relay-metadata";
-+ else if (chan_param.output == LTTNG_KERNEL_MMAP)
-+ transport_name = "relay-metadata-mmap";
-+ else
-+ return -EINVAL;
-+ break;
-+ default:
-+ transport_name = "<unknown>";
-+ break;
-+ }
-+ /*
-+ * We tolerate no failure path after channel creation. It will stay
-+ * invariant for the rest of the session.
-+ */
-+ chan = ltt_channel_create(session, transport_name, NULL,
-+ chan_param.subbuf_size,
-+ chan_param.num_subbuf,
-+ chan_param.switch_timer_interval,
-+ chan_param.read_timer_interval);
-+ if (!chan) {
-+ ret = -EINVAL;
-+ goto chan_error;
-+ }
-+ chan->file = chan_file;
-+ chan_file->private_data = chan;
-+ fd_install(chan_fd, chan_file);
-+ if (channel_type == METADATA_CHANNEL) {
-+ session->metadata = chan;
-+ lttng_metadata_create_events(chan_file);
-+ }
-+
-+ /* The channel created holds a reference on the session */
-+ atomic_long_inc(&session_file->f_count);
-+
-+ return chan_fd;
-+
-+chan_error:
-+ fput(chan_file);
-+file_error:
-+ put_unused_fd(chan_fd);
-+fd_error:
-+ return ret;
-+}
-+
-+/**
-+ * lttng_session_ioctl - lttng session fd ioctl
-+ *
-+ * @file: the file
-+ * @cmd: the command
-+ * @arg: command arg
-+ *
-+ * This ioctl implements lttng commands:
-+ * LTTNG_KERNEL_CHANNEL
-+ * Returns a LTTng channel file descriptor
-+ * LTTNG_KERNEL_ENABLE
-+ * Enables tracing for a session (weak enable)
-+ * LTTNG_KERNEL_DISABLE
-+ * Disables tracing for a session (strong disable)
-+ * LTTNG_KERNEL_METADATA
-+ * Returns a LTTng metadata file descriptor
-+ *
-+ * The returned channel will be deleted when its file descriptor is closed.
-+ */
-+static
-+long lttng_session_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-+{
-+ struct ltt_session *session = file->private_data;
-+
-+ switch (cmd) {
-+ case LTTNG_KERNEL_CHANNEL:
-+ return lttng_abi_create_channel(file,
-+ (struct lttng_kernel_channel __user *) arg,
-+ PER_CPU_CHANNEL);
-+ case LTTNG_KERNEL_SESSION_START:
-+ case LTTNG_KERNEL_ENABLE:
-+ return ltt_session_enable(session);
-+ case LTTNG_KERNEL_SESSION_STOP:
-+ case LTTNG_KERNEL_DISABLE:
-+ return ltt_session_disable(session);
-+ case LTTNG_KERNEL_METADATA:
-+ return lttng_abi_create_channel(file,
-+ (struct lttng_kernel_channel __user *) arg,
-+ METADATA_CHANNEL);
-+ default:
-+ return -ENOIOCTLCMD;
-+ }
-+}
-+
-+/*
-+ * Called when the last file reference is dropped.
-+ *
-+ * Big fat note: channels and events are invariant for the whole session after
-+ * their creation. So this session destruction also destroys all channel and
-+ * event structures specific to this session (they are not destroyed when their
-+ * individual file is released).
-+ */
-+static
-+int lttng_session_release(struct inode *inode, struct file *file)
-+{
-+ struct ltt_session *session = file->private_data;
-+
-+ if (session)
-+ ltt_session_destroy(session);
-+ return 0;
-+}
-+
-+static const struct file_operations lttng_session_fops = {
-+ .owner = THIS_MODULE,
-+ .release = lttng_session_release,
-+ .unlocked_ioctl = lttng_session_ioctl,
-+#ifdef CONFIG_COMPAT
-+ .compat_ioctl = lttng_session_ioctl,
-+#endif
-+};
-+
-+static
-+int lttng_abi_open_stream(struct file *channel_file)
-+{
-+ struct ltt_channel *channel = channel_file->private_data;
-+ struct lib_ring_buffer *buf;
-+ int stream_fd, ret;
-+ struct file *stream_file;
-+
-+ buf = channel->ops->buffer_read_open(channel->chan);
-+ if (!buf)
-+ return -ENOENT;
-+
-+ stream_fd = get_unused_fd();
-+ if (stream_fd < 0) {
-+ ret = stream_fd;
-+ goto fd_error;
-+ }
-+ stream_file = anon_inode_getfile("[lttng_stream]",
-+ &lib_ring_buffer_file_operations,
-+ buf, O_RDWR);
-+ if (IS_ERR(stream_file)) {
-+ ret = PTR_ERR(stream_file);
-+ goto file_error;
-+ }
-+ /*
-+ * OPEN_FMODE, called within anon_inode_getfile/alloc_file, don't honor
-+ * FMODE_LSEEK, FMODE_PREAD nor FMODE_PWRITE. We need to read from this
-+ * file descriptor, so we set FMODE_PREAD here.
-+ */
-+ stream_file->f_mode |= FMODE_PREAD;
-+ fd_install(stream_fd, stream_file);
-+ /*
-+ * The stream holds a reference to the channel within the generic ring
-+ * buffer library, so no need to hold a refcount on the channel and
-+ * session files here.
-+ */
-+ return stream_fd;
-+
-+file_error:
-+ put_unused_fd(stream_fd);
-+fd_error:
-+ channel->ops->buffer_read_close(buf);
-+ return ret;
-+}
-+
-+static
-+int lttng_abi_create_event(struct file *channel_file,
-+ struct lttng_kernel_event __user *uevent_param)
-+{
-+ struct ltt_channel *channel = channel_file->private_data;
-+ struct ltt_event *event;
-+ struct lttng_kernel_event event_param;
-+ int event_fd, ret;
-+ struct file *event_file;
-+
-+ if (copy_from_user(&event_param, uevent_param, sizeof(event_param)))
-+ return -EFAULT;
-+ event_param.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
-+ switch (event_param.instrumentation) {
-+ case LTTNG_KERNEL_KRETPROBE:
-+ event_param.u.kretprobe.symbol_name[LTTNG_SYM_NAME_LEN - 1] = '\0';
-+ break;
-+ case LTTNG_KERNEL_KPROBE:
-+ event_param.u.kprobe.symbol_name[LTTNG_SYM_NAME_LEN - 1] = '\0';
-+ break;
-+ case LTTNG_KERNEL_FUNCTION:
-+ event_param.u.ftrace.symbol_name[LTTNG_SYM_NAME_LEN - 1] = '\0';
-+ break;
-+ default:
-+ break;
-+ }
-+ switch (event_param.instrumentation) {
-+ default:
-+ event_fd = get_unused_fd();
-+ if (event_fd < 0) {
-+ ret = event_fd;
-+ goto fd_error;
-+ }
-+ event_file = anon_inode_getfile("[lttng_event]",
-+ &lttng_event_fops,
-+ NULL, O_RDWR);
-+ if (IS_ERR(event_file)) {
-+ ret = PTR_ERR(event_file);
-+ goto file_error;
-+ }
-+ /*
-+ * We tolerate no failure path after event creation. It
-+ * will stay invariant for the rest of the session.
-+ */
-+ event = ltt_event_create(channel, &event_param, NULL, NULL);
-+ if (!event) {
-+ ret = -EINVAL;
-+ goto event_error;
-+ }
-+ event_file->private_data = event;
-+ fd_install(event_fd, event_file);
-+ /* The event holds a reference on the channel */
-+ atomic_long_inc(&channel_file->f_count);
-+ break;
-+ case LTTNG_KERNEL_SYSCALL:
-+ /*
-+ * Only all-syscall tracing supported for now.
-+ */
-+ if (event_param.name[0] != '\0')
-+ return -EINVAL;
-+ ret = lttng_syscalls_register(channel, NULL);
-+ if (ret)
-+ goto fd_error;
-+ event_fd = 0;
-+ break;
-+ }
-+ return event_fd;
-+
-+event_error:
-+ fput(event_file);
-+file_error:
-+ put_unused_fd(event_fd);
-+fd_error:
-+ return ret;
-+}
-+
-+/**
-+ * lttng_channel_ioctl - lttng syscall through ioctl
-+ *
-+ * @file: the file
-+ * @cmd: the command
-+ * @arg: command arg
-+ *
-+ * This ioctl implements lttng commands:
-+ * LTTNG_KERNEL_STREAM
-+ * Returns an event stream file descriptor or failure.
-+ * (typically, one event stream records events from one CPU)
-+ * LTTNG_KERNEL_EVENT
-+ * Returns an event file descriptor or failure.
-+ * LTTNG_KERNEL_CONTEXT
-+ * Prepend a context field to each event in the channel
-+ * LTTNG_KERNEL_ENABLE
-+ * Enable recording for events in this channel (weak enable)
-+ * LTTNG_KERNEL_DISABLE
-+ * Disable recording for events in this channel (strong disable)
-+ *
-+ * Channel and event file descriptors also hold a reference on the session.
-+ */
-+static
-+long lttng_channel_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-+{
-+ struct ltt_channel *channel = file->private_data;
-+
-+ switch (cmd) {
-+ case LTTNG_KERNEL_STREAM:
-+ return lttng_abi_open_stream(file);
-+ case LTTNG_KERNEL_EVENT:
-+ return lttng_abi_create_event(file, (struct lttng_kernel_event __user *) arg);
-+ case LTTNG_KERNEL_CONTEXT:
-+ return lttng_abi_add_context(file,
-+ (struct lttng_kernel_context __user *) arg,
-+ &channel->ctx, channel->session);
-+ case LTTNG_KERNEL_ENABLE:
-+ return ltt_channel_enable(channel);
-+ case LTTNG_KERNEL_DISABLE:
-+ return ltt_channel_disable(channel);
-+ default:
-+ return -ENOIOCTLCMD;
-+ }
-+}
-+
-+/**
-+ * lttng_metadata_ioctl - lttng syscall through ioctl
-+ *
-+ * @file: the file
-+ * @cmd: the command
-+ * @arg: command arg
-+ *
-+ * This ioctl implements lttng commands:
-+ * LTTNG_KERNEL_STREAM
-+ * Returns an event stream file descriptor or failure.
-+ *
-+ * Channel and event file descriptors also hold a reference on the session.
-+ */
-+static
-+long lttng_metadata_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-+{
-+ switch (cmd) {
-+ case LTTNG_KERNEL_STREAM:
-+ return lttng_abi_open_stream(file);
-+ default:
-+ return -ENOIOCTLCMD;
-+ }
-+}
-+
-+/**
-+ * lttng_channel_poll - lttng stream addition/removal monitoring
-+ *
-+ * @file: the file
-+ * @wait: poll table
-+ */
-+unsigned int lttng_channel_poll(struct file *file, poll_table *wait)
-+{
-+ struct ltt_channel *channel = file->private_data;
-+ unsigned int mask = 0;
-+
-+ if (file->f_mode & FMODE_READ) {
-+ poll_wait_set_exclusive(wait);
-+ poll_wait(file, channel->ops->get_hp_wait_queue(channel->chan),
-+ wait);
-+
-+ if (channel->ops->is_disabled(channel->chan))
-+ return POLLERR;
-+ if (channel->ops->is_finalized(channel->chan))
-+ return POLLHUP;
-+ if (channel->ops->buffer_has_read_closed_stream(channel->chan))
-+ return POLLIN | POLLRDNORM;
-+ return 0;
-+ }
-+ return mask;
-+
-+}
-+
-+static
-+int lttng_channel_release(struct inode *inode, struct file *file)
-+{
-+ struct ltt_channel *channel = file->private_data;
-+
-+ if (channel)
-+ fput(channel->session->file);
-+ return 0;
-+}
-+
-+static const struct file_operations lttng_channel_fops = {
-+ .owner = THIS_MODULE,
-+ .release = lttng_channel_release,
-+ .poll = lttng_channel_poll,
-+ .unlocked_ioctl = lttng_channel_ioctl,
-+#ifdef CONFIG_COMPAT
-+ .compat_ioctl = lttng_channel_ioctl,
-+#endif
-+};
-+
-+static const struct file_operations lttng_metadata_fops = {
-+ .owner = THIS_MODULE,
-+ .release = lttng_channel_release,
-+ .unlocked_ioctl = lttng_metadata_ioctl,
-+#ifdef CONFIG_COMPAT
-+ .compat_ioctl = lttng_metadata_ioctl,
-+#endif
-+};
-+
-+/**
-+ * lttng_event_ioctl - lttng syscall through ioctl
-+ *
-+ * @file: the file
-+ * @cmd: the command
-+ * @arg: command arg
-+ *
-+ * This ioctl implements lttng commands:
-+ * LTTNG_KERNEL_CONTEXT
-+ * Prepend a context field to each record of this event
-+ * LTTNG_KERNEL_ENABLE
-+ * Enable recording for this event (weak enable)
-+ * LTTNG_KERNEL_DISABLE
-+ * Disable recording for this event (strong disable)
-+ */
-+static
-+long lttng_event_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-+{
-+ struct ltt_event *event = file->private_data;
-+
-+ switch (cmd) {
-+ case LTTNG_KERNEL_CONTEXT:
-+ return lttng_abi_add_context(file,
-+ (struct lttng_kernel_context __user *) arg,
-+ &event->ctx, event->chan->session);
-+ case LTTNG_KERNEL_ENABLE:
-+ return ltt_event_enable(event);
-+ case LTTNG_KERNEL_DISABLE:
-+ return ltt_event_disable(event);
-+ default:
-+ return -ENOIOCTLCMD;
-+ }
-+}
-+
-+static
-+int lttng_event_release(struct inode *inode, struct file *file)
-+{
-+ struct ltt_event *event = file->private_data;
-+
-+ if (event)
-+ fput(event->chan->file);
-+ return 0;
-+}
-+
-+/* TODO: filter control ioctl */
-+static const struct file_operations lttng_event_fops = {
-+ .owner = THIS_MODULE,
-+ .release = lttng_event_release,
-+ .unlocked_ioctl = lttng_event_ioctl,
-+#ifdef CONFIG_COMPAT
-+ .compat_ioctl = lttng_event_ioctl,
-+#endif
-+};
-+
-+int __init ltt_debugfs_abi_init(void)
-+{
-+ int ret = 0;
-+
-+ wrapper_vmalloc_sync_all();
-+ lttng_dentry = debugfs_create_file("lttng", S_IWUSR, NULL, NULL,
-+ &lttng_fops);
-+ if (IS_ERR(lttng_dentry))
-+ lttng_dentry = NULL;
-+
-+ lttng_proc_dentry = proc_create_data("lttng", S_IWUSR, NULL,
-+ &lttng_fops, NULL);
-+
-+ if (!lttng_dentry && !lttng_proc_dentry) {
-+ printk(KERN_ERR "Error creating LTTng control file\n");
-+ ret = -ENOMEM;
-+ goto error;
-+ }
-+error:
-+ return ret;
-+}
-+
-+void __exit ltt_debugfs_abi_exit(void)
-+{
-+ if (lttng_dentry)
-+ debugfs_remove(lttng_dentry);
-+ if (lttng_proc_dentry)
-+ remove_proc_entry("lttng", NULL);
-+}
-diff --git a/drivers/staging/lttng/ltt-debugfs-abi.h b/drivers/staging/lttng/ltt-debugfs-abi.h
-new file mode 100644
-index 0000000..42bc9fd
---- /dev/null
-+++ b/drivers/staging/lttng/ltt-debugfs-abi.h
-@@ -0,0 +1,153 @@
-+#ifndef _LTT_DEBUGFS_ABI_H
-+#define _LTT_DEBUGFS_ABI_H
-+
-+/*
-+ * ltt-debugfs-abi.h
-+ *
-+ * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng debugfs ABI header
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/fs.h>
-+
-+#define LTTNG_SYM_NAME_LEN 128
-+
-+enum lttng_kernel_instrumentation {
-+ LTTNG_KERNEL_TRACEPOINT = 0,
-+ LTTNG_KERNEL_KPROBE = 1,
-+ LTTNG_KERNEL_FUNCTION = 2,
-+ LTTNG_KERNEL_KRETPROBE = 3,
-+ LTTNG_KERNEL_NOOP = 4, /* not hooked */
-+ LTTNG_KERNEL_SYSCALL = 5,
-+};
-+
-+/*
-+ * LTTng consumer mode
-+ */
-+enum lttng_kernel_output {
-+ LTTNG_KERNEL_SPLICE = 0,
-+ LTTNG_KERNEL_MMAP = 1,
-+};
-+
-+/*
-+ * LTTng DebugFS ABI structures.
-+ */
-+
-+struct lttng_kernel_channel {
-+ int overwrite; /* 1: overwrite, 0: discard */
-+ uint64_t subbuf_size; /* in bytes */
-+ uint64_t num_subbuf;
-+ unsigned int switch_timer_interval; /* usecs */
-+ unsigned int read_timer_interval; /* usecs */
-+ enum lttng_kernel_output output; /* splice, mmap */
-+};
-+
-+struct lttng_kernel_kretprobe {
-+ uint64_t addr;
-+
-+ uint64_t offset;
-+ char symbol_name[LTTNG_SYM_NAME_LEN];
-+};
-+
-+/*
-+ * Either addr is used, or symbol_name and offset.
-+ */
-+struct lttng_kernel_kprobe {
-+ uint64_t addr;
-+
-+ uint64_t offset;
-+ char symbol_name[LTTNG_SYM_NAME_LEN];
-+};
-+
-+struct lttng_kernel_function_tracer {
-+ char symbol_name[LTTNG_SYM_NAME_LEN];
-+};
-+
-+/*
-+ * For syscall tracing, name = '\0' means "enable all".
-+ */
-+struct lttng_kernel_event {
-+ char name[LTTNG_SYM_NAME_LEN]; /* event name */
-+ enum lttng_kernel_instrumentation instrumentation;
-+ /* Per instrumentation type configuration */
-+ union {
-+ struct lttng_kernel_kretprobe kretprobe;
-+ struct lttng_kernel_kprobe kprobe;
-+ struct lttng_kernel_function_tracer ftrace;
-+ } u;
-+};
-+
-+struct lttng_kernel_tracer_version {
-+ uint32_t version;
-+ uint32_t patchlevel;
-+ uint32_t sublevel;
-+};
-+
-+enum lttng_kernel_calibrate_type {
-+ LTTNG_KERNEL_CALIBRATE_KRETPROBE,
-+};
-+
-+struct lttng_kernel_calibrate {
-+ enum lttng_kernel_calibrate_type type; /* type (input) */
-+};
-+
-+enum lttng_kernel_context_type {
-+ LTTNG_KERNEL_CONTEXT_PID = 0,
-+ LTTNG_KERNEL_CONTEXT_PERF_COUNTER = 1,
-+ LTTNG_KERNEL_CONTEXT_PROCNAME = 2,
-+ LTTNG_KERNEL_CONTEXT_PRIO = 3,
-+ LTTNG_KERNEL_CONTEXT_NICE = 4,
-+ LTTNG_KERNEL_CONTEXT_VPID = 5,
-+ LTTNG_KERNEL_CONTEXT_TID = 6,
-+ LTTNG_KERNEL_CONTEXT_VTID = 7,
-+ LTTNG_KERNEL_CONTEXT_PPID = 8,
-+ LTTNG_KERNEL_CONTEXT_VPPID = 9,
-+};
-+
-+struct lttng_kernel_perf_counter_ctx {
-+ uint32_t type;
-+ uint64_t config;
-+ char name[LTTNG_SYM_NAME_LEN];
-+};
-+
-+struct lttng_kernel_context {
-+ enum lttng_kernel_context_type ctx;
-+ union {
-+ struct lttng_kernel_perf_counter_ctx perf_counter;
-+ } u;
-+};
-+
-+/* LTTng file descriptor ioctl */
-+#define LTTNG_KERNEL_SESSION _IO(0xF6, 0x40)
-+#define LTTNG_KERNEL_TRACER_VERSION \
-+ _IOR(0xF6, 0x41, struct lttng_kernel_tracer_version)
-+#define LTTNG_KERNEL_TRACEPOINT_LIST _IO(0xF6, 0x42)
-+#define LTTNG_KERNEL_WAIT_QUIESCENT _IO(0xF6, 0x43)
-+#define LTTNG_KERNEL_CALIBRATE \
-+ _IOWR(0xF6, 0x44, struct lttng_kernel_calibrate)
-+
-+/* Session FD ioctl */
-+#define LTTNG_KERNEL_METADATA \
-+ _IOW(0xF6, 0x50, struct lttng_kernel_channel)
-+#define LTTNG_KERNEL_CHANNEL \
-+ _IOW(0xF6, 0x51, struct lttng_kernel_channel)
-+#define LTTNG_KERNEL_SESSION_START _IO(0xF6, 0x52)
-+#define LTTNG_KERNEL_SESSION_STOP _IO(0xF6, 0x53)
-+
-+/* Channel FD ioctl */
-+#define LTTNG_KERNEL_STREAM _IO(0xF6, 0x60)
-+#define LTTNG_KERNEL_EVENT \
-+ _IOW(0xF6, 0x61, struct lttng_kernel_event)
-+
-+/* Event and Channel FD ioctl */
-+#define LTTNG_KERNEL_CONTEXT \
-+ _IOW(0xF6, 0x70, struct lttng_kernel_context)
-+
-+/* Event, Channel and Session ioctl */
-+#define LTTNG_KERNEL_ENABLE _IO(0xF6, 0x80)
-+#define LTTNG_KERNEL_DISABLE _IO(0xF6, 0x81)
-+
-+#endif /* _LTT_DEBUGFS_ABI_H */
---
-1.7.9
-
diff --git a/patches.lttng/0014-lttng-Add-documentation-and-TODO-files.patch b/patches.lttng/0014-lttng-Add-documentation-and-TODO-files.patch
deleted file mode 100644
index 1d3c7fe2b0a..00000000000
--- a/patches.lttng/0014-lttng-Add-documentation-and-TODO-files.patch
+++ /dev/null
@@ -1,249 +0,0 @@
-From 18b2248a0dcc70284d68ecd095cd2e8451714966 Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 28 Nov 2011 07:42:22 -0500
-Subject: lttng: Add documentation and TODO files
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- drivers/staging/lttng/LICENSE | 27 ++++++++
- drivers/staging/lttng/README | 48 ++++++++++++++
- drivers/staging/lttng/TODO | 137 +++++++++++++++++++++++++++++++++++++++++
- 3 files changed, 212 insertions(+), 0 deletions(-)
- create mode 100644 drivers/staging/lttng/LICENSE
- create mode 100644 drivers/staging/lttng/README
- create mode 100644 drivers/staging/lttng/TODO
-
-diff --git a/drivers/staging/lttng/LICENSE b/drivers/staging/lttng/LICENSE
-new file mode 100644
-index 0000000..bb880bf
---- /dev/null
-+++ b/drivers/staging/lttng/LICENSE
-@@ -0,0 +1,27 @@
-+LTTng modules licensing
-+Mathieu Desnoyers
-+June 2, 2011
-+
-+* LGPLv2.1/GPLv2 dual-license
-+
-+The files contained within this package are licensed under
-+LGPLv2.1/GPLv2 dual-license (see lgpl-2.1.txt and gpl-2.0.txt for
-+details), except for files identified by the following sections.
-+
-+* GPLv2 license
-+
-+These files are licensed exclusively under the GPLv2 license. See
-+gpl-2.0.txt for details.
-+
-+lib/ringbuffer/ring_buffer_splice.c
-+lib/ringbuffer/ring_buffer_mmap.c
-+instrumentation/events/mainline/*.h
-+instrumentation/events/lttng-modules/*.h
-+
-+* MIT-style license
-+
-+These files are licensed under an MIT-style license:
-+
-+lib/prio_heap/lttng_prio_heap.h
-+lib/prio_heap/lttng_prio_heap.c
-+lib/bitfield.h
-diff --git a/drivers/staging/lttng/README b/drivers/staging/lttng/README
-new file mode 100644
-index 0000000..a154d6e
---- /dev/null
-+++ b/drivers/staging/lttng/README
-@@ -0,0 +1,48 @@
-+LTTng 2.0 modules
-+
-+Mathieu Desnoyers
-+November 1st, 2011
-+
-+LTTng 2.0 kernel modules is currently part of the Linux kernel staging
-+tree. It features (new features since LTTng 0.x):
-+
-+- Produces CTF (Common Trace Format) natively,
-+ (http://www.efficios.com/ctf)
-+- Tracepoints, Function tracer, CPU Performance Monitoring Unit (PMU)
-+ counters, kprobes, and kretprobes support,
-+- Integrated interface for both kernel and userspace tracing,
-+- Have the ability to attach "context" information to events in the
-+ trace (e.g. any PMU counter, pid, ppid, tid, comm name, etc).
-+ All the extra information fields to be collected with events are
-+ optional, specified on a per-tracing-session basis (except for
-+ timestamp and event id, which are mandatory).
-+
-+To build and install, you need to select "Staging" modules, and the
-+LTTng kernel tracer.
-+
-+Use lttng-tools to control the tracer. LTTng tools should automatically
-+load the kernel modules when needed. Use Babeltrace to print traces as a
-+human-readable text log. These tools are available at the following URL:
-+http://lttng.org/lttng2.0
-+
-+Please note that the LTTng-UST 2.0 (user-space tracing counterpart of
-+LTTng 2.0) is now ready to be used, but still only available from the
-+git repository.
-+
-+So far, it has been tested on vanilla Linux kernels 2.6.38, 2.6.39 and
-+3.0 (on x86 32/64-bit, and powerpc 32-bit at the moment, build tested on
-+ARM). It should work fine with newer kernels and other architectures,
-+but expect build issues with kernels older than 2.6.36. The clock source
-+currently used is the standard gettimeofday (slower, less scalable and
-+less precise than the LTTng 0.x clocks). Support for LTTng 0.x clocks
-+will be added back soon into LTTng 2.0. Please note that lttng-modules
-+2.0 can build on a Linux kernel patched with the LTTng 0.x patchset, but
-+the lttng-modules 2.0 replace the lttng-modules 0.x, so both tracers
-+cannot be installed at the same time for a given kernel version.
-+
-+* Note about Perf PMU counters support
-+
-+Each PMU counter has its zero value set when it is attached to a context with
-+add-context. Therefore, it is normal that the same counters attached to both the
-+stream context and event context show different values for a given event; what
-+matters is that they increment at the same rate.
-diff --git a/drivers/staging/lttng/TODO b/drivers/staging/lttng/TODO
-new file mode 100644
-index 0000000..3fdc5e6
---- /dev/null
-+++ b/drivers/staging/lttng/TODO
-@@ -0,0 +1,137 @@
-+Please contact Mathieu Desnoyers <mathieu.desnoyers@efficios.com> for
-+questions about this TODO list. The "Cleanup/Testing" section would be
-+good to go through before integration into mainline. The "Features"
-+section is a wish list of features to complete before releasing the
-+"LTTng 2.0" final version, but are not required to have LTTng working.
-+These features are mostly performance enhancements and instrumentation
-+enhancements.
-+
-+TODO:
-+
-+A) Cleanup/Testing
-+
-+ 1) Remove debugfs "lttng" file (keep only procfs "lttng" file).
-+ The rationale for this is that this file is needed for
-+ user-level tracing support (LTTng-UST 2.0) intended to be
-+ used on production system, and therefore should be present as
-+ part of a "usually mounted" filesystem rather than a debug
-+ filesystem.
-+
-+ 2) Cleanup wrappers. The drivers/staging/lttng/wrapper directory
-+ contains various wrapper headers that use kallsyms lookups to
-+ work around some missing EXPORT_SYMBOL_GPL() in the mainline
-+ kernel. Ideally, those few symbols should become exported to
-+ modules by the kernel.
-+
-+ 3) Test lib ring buffer snapshot feature.
-+ When working on the lttngtop project, Julien Desfossez
-+ reported that he needed to push the consumer position
-+ forward explicitely with lib_ring_buffer_put_next_subbuf.
-+ This means that although the usual case of pairs of
-+ lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf
-+ work fine, there is probably a problem that needs to be
-+ investigated in
-+ lib_ring_buffer_get_subbuf/lib_ring_buffer_put_subbuf, which
-+ depend on the producer to push the reader position.
-+ Contact: Julien Desfossez <julien.desfossez@polymtl.ca>
-+
-+ 4) Test latest -rt kernel support.
-+ There has been report of corrupted traces when tracing a
-+ 3.0.10-rt27 in the area of access_ok() system call event.
-+ Still has to be investigated. Cannot be reproduced with
-+ mainline kernel.
-+ Contact: Yannick Brosseau <yannick.brosseau@polymtl.ca>
-+
-+B) Features
-+
-+ 1) Integration of the LTTng 0.x trace clocks into
-+ LTTng 2.0.
-+ Currently using mainline kernel monotonic clock. NMIs can
-+ therefore not be traced, and this causes a significant
-+ performance degradation compared to the LTTng 0.x trace
-+ clocks. Imply the creation of drivers/staging/lttng/arch to
-+ contain the arch-specific clock support files.
-+ * Dependency: addition of clock descriptions to CTF.
-+ See: http://git.lttng.org/?p=linux-2.6-lttng.git;a=summary
-+ for the LTTng 0.x git tree.
-+
-+ 2) Port OMAP3 LTTng trace clocks to x86 to support systems
-+ without constant TSC.
-+ * Dependency: (B.1)
-+ See: http://git.lttng.org/?p=linux-2.6-lttng.git;a=summary
-+ for the LTTng 0.x git tree.
-+
-+ 3) Implement mmap operation on an anonymous file created by a
-+ LTTNG_KERNEL_CLOCK ioctl to export data to export
-+ synchronized kernel and user-level LTTng trace clocks:
-+ with:
-+ - shared per-cpu data,
-+ - read seqlock.
-+ The content exported by this shared memory area will be
-+ arch-specific.
-+ * Dependency: (B.1) && (B.2)
-+ See: http://git.lttng.org/?p=linux-2.6-lttng.git;a=summary
-+ for the LTTng 0.x git tree, which has vDSO support for
-+ LTTng trace clock on the x86 architecture.
-+
-+ 3) Integrate the "statedump" module from LTTng 0.x into LTTng
-+ 2.0.
-+ * Dependency: addition of "dynamic enumerations" type to CTF.
-+ See: http://git.lttng.org/?p=lttng-modules.git;a=shortlog;h=refs/heads/v0.19-stable
-+ ltt-statedump.c
-+
-+ 4) Generate system call TRACE_EVENT headers for all
-+ architectures (currently done: x86 32/64).
-+
-+ 5) Define "unknown" system calls into instrumentation/syscalls
-+ override files / or do SYSCALL_DEFINE improvements to
-+ mainline kernel to allow automatic generation of these
-+ missing system call descriptions.
-+
-+ 6) Create missing tracepoint event headers files into
-+ instrumentation/events from headers located in
-+ include/trace/events/. Choice: either do as currently done,
-+ and copy those headers locally into the lttng driver and
-+ perform the modifications locally, or push TRACE_EVENT API
-+ modification into mainline headers, which would require
-+ collaboration from Ftrace/Perf maintainers.
-+
-+ 7) Poll: implement a poll and/or epoll exclusive wakeup scheme,
-+ which contradicts POSIX, but protect multiple consumer
-+ threads from thundering herd effect.
-+
-+ 8) Re-integrate sample modules from libringbuffer into
-+ lttng driver. Those modules can be used as example of how to
-+ use libringbuffer in other contexts than LTTng, and are
-+ useful to perform benchmarks of the ringbuffer library.
-+ See: http://www.efficios.com/ringbuffer
-+
-+ 9) NOHZ support for lib ring buffer. NOHZ infrastructure in the
-+ Linux kernel does not support notifiers chains, which does
-+ not let LTTng play nicely with low power consumption setups
-+ for flight recorder (overwrite mode) live traces. One way to
-+ allow integration between NOHZ and LTTng would be to add
-+ support for such notifiers into NOHZ kernel infrastructure.
-+
-+ 10) Turn drivers/staging/lttng/ltt-probes.c probe_list into a
-+ hash table. Turns O(n^2) trace systems registration (cost
-+ for n systems) into O(n). (O(1) per system)
-+
-+ 11) drivers/staging/lttng/probes/lttng-ftrace.c:
-+ LTTng currently uses kretprobes for per-function tracing,
-+ not the function tracer. So lttng-ftrace.c should be used
-+ for "all" function tracing.
-+
-+ 12) drivers/staging/lttng/probes/lttng-types.c:
-+ This is a currently unused placeholder to export entire C
-+ type declarations into the trace metadata, e.g. for support
-+ of describing the layout of structures/enumeration mapping
-+ along with syscall entry events. The design of this support
-+ will likely change though, and become integrated with the
-+ TRACE_EVENT support within lttng, by adding new macros, and
-+ support for generation of metadata from these macros, to
-+ allow description of those compound types/enumerations.
-+
-+Please send patches
-+To: Greg Kroah-Hartman <greg@kroah.com>
-+To: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
---
-1.7.9
-
diff --git a/patches.lttng/0015-lttng-add-system-call-instrumentation-probe.patch b/patches.lttng/0015-lttng-add-system-call-instrumentation-probe.patch
deleted file mode 100644
index f38581e1a7f..00000000000
--- a/patches.lttng/0015-lttng-add-system-call-instrumentation-probe.patch
+++ /dev/null
@@ -1,459 +0,0 @@
-From 97104e24fbefa7081e4c9aa9bff3c4fa1a0212cf Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 28 Nov 2011 07:42:23 -0500
-Subject: lttng: add system call instrumentation probe
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- drivers/staging/lttng/lttng-syscalls.c | 438 ++++++++++++++++++++++++++++++++
- 1 files changed, 438 insertions(+), 0 deletions(-)
- create mode 100644 drivers/staging/lttng/lttng-syscalls.c
-
-diff --git a/drivers/staging/lttng/lttng-syscalls.c b/drivers/staging/lttng/lttng-syscalls.c
-new file mode 100644
-index 0000000..16624a7f7
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-syscalls.c
-@@ -0,0 +1,438 @@
-+/*
-+ * lttng-syscalls.c
-+ *
-+ * Copyright 2010-2011 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng syscall probes.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/compat.h>
-+#include <asm/ptrace.h>
-+#include <asm/syscall.h>
-+
-+#include "ltt-events.h"
-+
-+#ifndef CONFIG_COMPAT
-+static inline int is_compat_task(void)
-+{
-+ return 0;
-+}
-+#endif
-+
-+static
-+void syscall_entry_probe(void *__data, struct pt_regs *regs, long id);
-+
-+/*
-+ * Take care of NOARGS not supported by mainline.
-+ */
-+#define DECLARE_EVENT_CLASS_NOARGS(name, tstruct, assign, print)
-+#define DEFINE_EVENT_NOARGS(template, name)
-+#define TRACE_EVENT_NOARGS(name, struct, assign, print)
-+
-+/*
-+ * Create LTTng tracepoint probes.
-+ */
-+#define LTTNG_PACKAGE_BUILD
-+#define CREATE_TRACE_POINTS
-+#define TP_MODULE_OVERRIDE
-+#define TRACE_INCLUDE_PATH ../instrumentation/syscalls/headers
-+
-+#define PARAMS(args...) args
-+
-+#undef TRACE_SYSTEM
-+
-+/* Hijack probe callback for system calls */
-+#undef TP_PROBE_CB
-+#define TP_PROBE_CB(_template) &syscall_entry_probe
-+#define SC_TRACE_EVENT(_name, _proto, _args, _struct, _assign, _printk) \
-+ TRACE_EVENT(_name, PARAMS(_proto), PARAMS(_args),\
-+ PARAMS(_struct), PARAMS(_assign), PARAMS(_printk))
-+#define SC_DECLARE_EVENT_CLASS_NOARGS(_name, _struct, _assign, _printk) \
-+ DECLARE_EVENT_CLASS_NOARGS(_name, PARAMS(_struct), PARAMS(_assign),\
-+ PARAMS(_printk))
-+#define SC_DEFINE_EVENT_NOARGS(_template, _name) \
-+ DEFINE_EVENT_NOARGS(_template, _name)
-+#define TRACE_SYSTEM syscalls_integers
-+#include "instrumentation/syscalls/headers/syscalls_integers.h"
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM syscalls_pointers
-+#include "instrumentation/syscalls/headers/syscalls_pointers.h"
-+#undef TRACE_SYSTEM
-+#undef SC_TRACE_EVENT
-+#undef SC_DECLARE_EVENT_CLASS_NOARGS
-+#undef SC_DEFINE_EVENT_NOARGS
-+
-+#define TRACE_SYSTEM syscalls_unknown
-+#include "instrumentation/syscalls/headers/syscalls_unknown.h"
-+#undef TRACE_SYSTEM
-+
-+/* For compat syscalls */
-+#undef _TRACE_SYSCALLS_integers_H
-+#undef _TRACE_SYSCALLS_pointers_H
-+
-+/* Hijack probe callback for system calls */
-+#undef TP_PROBE_CB
-+#define TP_PROBE_CB(_template) &syscall_entry_probe
-+#define SC_TRACE_EVENT(_name, _proto, _args, _struct, _assign, _printk) \
-+ TRACE_EVENT(compat_##_name, PARAMS(_proto), PARAMS(_args), \
-+ PARAMS(_struct), PARAMS(_assign), \
-+ PARAMS(_printk))
-+#define SC_DECLARE_EVENT_CLASS_NOARGS(_name, _struct, _assign, _printk) \
-+ DECLARE_EVENT_CLASS_NOARGS(compat_##_name, PARAMS(_struct), \
-+ PARAMS(_assign), PARAMS(_printk))
-+#define SC_DEFINE_EVENT_NOARGS(_template, _name) \
-+ DEFINE_EVENT_NOARGS(compat_##_template, compat_##_name)
-+#define TRACE_SYSTEM compat_syscalls_integers
-+#include "instrumentation/syscalls/headers/compat_syscalls_integers.h"
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM compat_syscalls_pointers
-+#include "instrumentation/syscalls/headers/compat_syscalls_pointers.h"
-+#undef TRACE_SYSTEM
-+#undef SC_TRACE_EVENT
-+#undef SC_DECLARE_EVENT_CLASS_NOARGS
-+#undef SC_DEFINE_EVENT_NOARGS
-+#undef TP_PROBE_CB
-+
-+#undef TP_MODULE_OVERRIDE
-+#undef LTTNG_PACKAGE_BUILD
-+#undef CREATE_TRACE_POINTS
-+
-+struct trace_syscall_entry {
-+ void *func;
-+ const struct lttng_event_desc *desc;
-+ const struct lttng_event_field *fields;
-+ unsigned int nrargs;
-+};
-+
-+#define CREATE_SYSCALL_TABLE
-+
-+#undef TRACE_SYSCALL_TABLE
-+#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs) \
-+ [ _nr ] = { \
-+ .func = __event_probe__##_template, \
-+ .nrargs = (_nrargs), \
-+ .fields = __event_fields___##_template, \
-+ .desc = &__event_desc___##_name, \
-+ },
-+
-+static const struct trace_syscall_entry sc_table[] = {
-+#include "instrumentation/syscalls/headers/syscalls_integers.h"
-+#include "instrumentation/syscalls/headers/syscalls_pointers.h"
-+};
-+
-+#undef TRACE_SYSCALL_TABLE
-+#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs) \
-+ [ _nr ] = { \
-+ .func = __event_probe__##compat_##_template, \
-+ .nrargs = (_nrargs), \
-+ .fields = __event_fields___##compat_##_template,\
-+ .desc = &__event_desc___##compat_##_name, \
-+ },
-+
-+/* Create compatibility syscall table */
-+const struct trace_syscall_entry compat_sc_table[] = {
-+#include "instrumentation/syscalls/headers/compat_syscalls_integers.h"
-+#include "instrumentation/syscalls/headers/compat_syscalls_pointers.h"
-+};
-+
-+#undef CREATE_SYSCALL_TABLE
-+
-+static void syscall_entry_unknown(struct ltt_event *event,
-+ struct pt_regs *regs, unsigned int id)
-+{
-+ unsigned long args[UNKNOWN_SYSCALL_NRARGS];
-+
-+ syscall_get_arguments(current, regs, 0, UNKNOWN_SYSCALL_NRARGS, args);
-+ if (unlikely(is_compat_task()))
-+ __event_probe__compat_sys_unknown(event, id, args);
-+ else
-+ __event_probe__sys_unknown(event, id, args);
-+}
-+
-+void syscall_entry_probe(void *__data, struct pt_regs *regs, long id)
-+{
-+ struct ltt_channel *chan = __data;
-+ struct ltt_event *event, *unknown_event;
-+ const struct trace_syscall_entry *table, *entry;
-+ size_t table_len;
-+
-+ if (unlikely(is_compat_task())) {
-+ table = compat_sc_table;
-+ table_len = ARRAY_SIZE(compat_sc_table);
-+ unknown_event = chan->sc_compat_unknown;
-+ } else {
-+ table = sc_table;
-+ table_len = ARRAY_SIZE(sc_table);
-+ unknown_event = chan->sc_unknown;
-+ }
-+ if (unlikely(id >= table_len)) {
-+ syscall_entry_unknown(unknown_event, regs, id);
-+ return;
-+ }
-+ if (unlikely(is_compat_task()))
-+ event = chan->compat_sc_table[id];
-+ else
-+ event = chan->sc_table[id];
-+ if (unlikely(!event)) {
-+ syscall_entry_unknown(unknown_event, regs, id);
-+ return;
-+ }
-+ entry = &table[id];
-+ WARN_ON_ONCE(!entry);
-+
-+ switch (entry->nrargs) {
-+ case 0:
-+ {
-+ void (*fptr)(void *__data) = entry->func;
-+
-+ fptr(event);
-+ break;
-+ }
-+ case 1:
-+ {
-+ void (*fptr)(void *__data, unsigned long arg0) = entry->func;
-+ unsigned long args[1];
-+
-+ syscall_get_arguments(current, regs, 0, entry->nrargs, args);
-+ fptr(event, args[0]);
-+ break;
-+ }
-+ case 2:
-+ {
-+ void (*fptr)(void *__data,
-+ unsigned long arg0,
-+ unsigned long arg1) = entry->func;
-+ unsigned long args[2];
-+
-+ syscall_get_arguments(current, regs, 0, entry->nrargs, args);
-+ fptr(event, args[0], args[1]);
-+ break;
-+ }
-+ case 3:
-+ {
-+ void (*fptr)(void *__data,
-+ unsigned long arg0,
-+ unsigned long arg1,
-+ unsigned long arg2) = entry->func;
-+ unsigned long args[3];
-+
-+ syscall_get_arguments(current, regs, 0, entry->nrargs, args);
-+ fptr(event, args[0], args[1], args[2]);
-+ break;
-+ }
-+ case 4:
-+ {
-+ void (*fptr)(void *__data,
-+ unsigned long arg0,
-+ unsigned long arg1,
-+ unsigned long arg2,
-+ unsigned long arg3) = entry->func;
-+ unsigned long args[4];
-+
-+ syscall_get_arguments(current, regs, 0, entry->nrargs, args);
-+ fptr(event, args[0], args[1], args[2], args[3]);
-+ break;
-+ }
-+ case 5:
-+ {
-+ void (*fptr)(void *__data,
-+ unsigned long arg0,
-+ unsigned long arg1,
-+ unsigned long arg2,
-+ unsigned long arg3,
-+ unsigned long arg4) = entry->func;
-+ unsigned long args[5];
-+
-+ syscall_get_arguments(current, regs, 0, entry->nrargs, args);
-+ fptr(event, args[0], args[1], args[2], args[3], args[4]);
-+ break;
-+ }
-+ case 6:
-+ {
-+ void (*fptr)(void *__data,
-+ unsigned long arg0,
-+ unsigned long arg1,
-+ unsigned long arg2,
-+ unsigned long arg3,
-+ unsigned long arg4,
-+ unsigned long arg5) = entry->func;
-+ unsigned long args[6];
-+
-+ syscall_get_arguments(current, regs, 0, entry->nrargs, args);
-+ fptr(event, args[0], args[1], args[2],
-+ args[3], args[4], args[5]);
-+ break;
-+ }
-+ default:
-+ break;
-+ }
-+}
-+
-+/* noinline to diminish caller stack size */
-+static
-+int fill_table(const struct trace_syscall_entry *table, size_t table_len,
-+ struct ltt_event **chan_table, struct ltt_channel *chan, void *filter)
-+{
-+ const struct lttng_event_desc *desc;
-+ unsigned int i;
-+
-+ /* Allocate events for each syscall, insert into table */
-+ for (i = 0; i < table_len; i++) {
-+ struct lttng_kernel_event ev;
-+ desc = table[i].desc;
-+
-+ if (!desc) {
-+ /* Unknown syscall */
-+ continue;
-+ }
-+ /*
-+ * Skip those already populated by previous failed
-+ * register for this channel.
-+ */
-+ if (chan_table[i])
-+ continue;
-+ memset(&ev, 0, sizeof(ev));
-+ strncpy(ev.name, desc->name, LTTNG_SYM_NAME_LEN);
-+ ev.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
-+ ev.instrumentation = LTTNG_KERNEL_NOOP;
-+ chan_table[i] = ltt_event_create(chan, &ev, filter,
-+ desc);
-+ if (!chan_table[i]) {
-+ /*
-+ * If something goes wrong in event registration
-+ * after the first one, we have no choice but to
-+ * leave the previous events in there, until
-+ * deleted by session teardown.
-+ */
-+ return -EINVAL;
-+ }
-+ }
-+ return 0;
-+}
-+
-+int lttng_syscalls_register(struct ltt_channel *chan, void *filter)
-+{
-+ struct lttng_kernel_event ev;
-+ int ret;
-+
-+ wrapper_vmalloc_sync_all();
-+
-+ if (!chan->sc_table) {
-+ /* create syscall table mapping syscall to events */
-+ chan->sc_table = kzalloc(sizeof(struct ltt_event *)
-+ * ARRAY_SIZE(sc_table), GFP_KERNEL);
-+ if (!chan->sc_table)
-+ return -ENOMEM;
-+ }
-+
-+#ifdef CONFIG_COMPAT
-+ if (!chan->compat_sc_table) {
-+ /* create syscall table mapping compat syscall to events */
-+ chan->compat_sc_table = kzalloc(sizeof(struct ltt_event *)
-+ * ARRAY_SIZE(compat_sc_table), GFP_KERNEL);
-+ if (!chan->compat_sc_table)
-+ return -ENOMEM;
-+ }
-+#endif
-+ if (!chan->sc_unknown) {
-+ const struct lttng_event_desc *desc =
-+ &__event_desc___sys_unknown;
-+
-+ memset(&ev, 0, sizeof(ev));
-+ strncpy(ev.name, desc->name, LTTNG_SYM_NAME_LEN);
-+ ev.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
-+ ev.instrumentation = LTTNG_KERNEL_NOOP;
-+ chan->sc_unknown = ltt_event_create(chan, &ev, filter,
-+ desc);
-+ if (!chan->sc_unknown) {
-+ return -EINVAL;
-+ }
-+ }
-+
-+ if (!chan->sc_compat_unknown) {
-+ const struct lttng_event_desc *desc =
-+ &__event_desc___compat_sys_unknown;
-+
-+ memset(&ev, 0, sizeof(ev));
-+ strncpy(ev.name, desc->name, LTTNG_SYM_NAME_LEN);
-+ ev.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
-+ ev.instrumentation = LTTNG_KERNEL_NOOP;
-+ chan->sc_compat_unknown = ltt_event_create(chan, &ev, filter,
-+ desc);
-+ if (!chan->sc_compat_unknown) {
-+ return -EINVAL;
-+ }
-+ }
-+
-+ if (!chan->sc_exit) {
-+ const struct lttng_event_desc *desc =
-+ &__event_desc___exit_syscall;
-+
-+ memset(&ev, 0, sizeof(ev));
-+ strncpy(ev.name, desc->name, LTTNG_SYM_NAME_LEN);
-+ ev.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
-+ ev.instrumentation = LTTNG_KERNEL_NOOP;
-+ chan->sc_exit = ltt_event_create(chan, &ev, filter,
-+ desc);
-+ if (!chan->sc_exit) {
-+ return -EINVAL;
-+ }
-+ }
-+
-+ ret = fill_table(sc_table, ARRAY_SIZE(sc_table),
-+ chan->sc_table, chan, filter);
-+ if (ret)
-+ return ret;
-+#ifdef CONFIG_COMPAT
-+ ret = fill_table(compat_sc_table, ARRAY_SIZE(compat_sc_table),
-+ chan->compat_sc_table, chan, filter);
-+ if (ret)
-+ return ret;
-+#endif
-+ ret = tracepoint_probe_register("sys_enter",
-+ (void *) syscall_entry_probe, chan);
-+ if (ret)
-+ return ret;
-+ /*
-+ * We change the name of sys_exit tracepoint due to namespace
-+ * conflict with sys_exit syscall entry.
-+ */
-+ ret = tracepoint_probe_register("sys_exit",
-+ (void *) __event_probe__exit_syscall,
-+ chan->sc_exit);
-+ if (ret) {
-+ WARN_ON_ONCE(tracepoint_probe_unregister("sys_enter",
-+ (void *) syscall_entry_probe, chan));
-+ }
-+ return ret;
-+}
-+
-+/*
-+ * Only called at session destruction.
-+ */
-+int lttng_syscalls_unregister(struct ltt_channel *chan)
-+{
-+ int ret;
-+
-+ if (!chan->sc_table)
-+ return 0;
-+ ret = tracepoint_probe_unregister("sys_exit",
-+ (void *) __event_probe__exit_syscall,
-+ chan->sc_exit);
-+ if (ret)
-+ return ret;
-+ ret = tracepoint_probe_unregister("sys_enter",
-+ (void *) syscall_entry_probe, chan);
-+ if (ret)
-+ return ret;
-+ /* ltt_event destroy will be performed by ltt_session_destroy() */
-+ kfree(chan->sc_table);
-+#ifdef CONFIG_COMPAT
-+ kfree(chan->compat_sc_table);
-+#endif
-+ return 0;
-+}
---
-1.7.9
-
diff --git a/patches.lttng/0016-lttng-probe-callbacks.patch b/patches.lttng/0016-lttng-probe-callbacks.patch
deleted file mode 100644
index 60ce75cd78d..00000000000
--- a/patches.lttng/0016-lttng-probe-callbacks.patch
+++ /dev/null
@@ -1,2035 +0,0 @@
-From 1e8ab70d74ed14bc287e2cb98145e860e2d95f6e Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 28 Nov 2011 07:42:24 -0500
-Subject: lttng: probe callbacks
-
-Implement the LTTng probe callbacks. One notable file here is
-lttng-events.h, which is the core implementation of the LTTng
-TRACE_EVENT macros for generation of probes and tracepoint decription
-from the TRACE_EVENT declarations.
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- drivers/staging/lttng/probes/Makefile | 37 ++
- drivers/staging/lttng/probes/define_trace.h | 132 ++++
- drivers/staging/lttng/probes/lttng-events-reset.h | 84 +++
- drivers/staging/lttng/probes/lttng-events.h | 703 +++++++++++++++++++++
- drivers/staging/lttng/probes/lttng-ftrace.c | 188 ++++++
- drivers/staging/lttng/probes/lttng-kprobes.c | 164 +++++
- drivers/staging/lttng/probes/lttng-kretprobes.c | 277 ++++++++
- drivers/staging/lttng/probes/lttng-probe-block.c | 31 +
- drivers/staging/lttng/probes/lttng-probe-irq.c | 31 +
- drivers/staging/lttng/probes/lttng-probe-kvm.c | 31 +
- drivers/staging/lttng/probes/lttng-probe-lttng.c | 24 +
- drivers/staging/lttng/probes/lttng-probe-sched.c | 30 +
- drivers/staging/lttng/probes/lttng-type-list.h | 21 +
- drivers/staging/lttng/probes/lttng-types.c | 49 ++
- drivers/staging/lttng/probes/lttng-types.h | 72 +++
- drivers/staging/lttng/probes/lttng.h | 15 +
- 16 files changed, 1889 insertions(+), 0 deletions(-)
- create mode 100644 drivers/staging/lttng/probes/Makefile
- create mode 100644 drivers/staging/lttng/probes/define_trace.h
- create mode 100644 drivers/staging/lttng/probes/lttng-events-reset.h
- create mode 100644 drivers/staging/lttng/probes/lttng-events.h
- create mode 100644 drivers/staging/lttng/probes/lttng-ftrace.c
- create mode 100644 drivers/staging/lttng/probes/lttng-kprobes.c
- create mode 100644 drivers/staging/lttng/probes/lttng-kretprobes.c
- create mode 100644 drivers/staging/lttng/probes/lttng-probe-block.c
- create mode 100644 drivers/staging/lttng/probes/lttng-probe-irq.c
- create mode 100644 drivers/staging/lttng/probes/lttng-probe-kvm.c
- create mode 100644 drivers/staging/lttng/probes/lttng-probe-lttng.c
- create mode 100644 drivers/staging/lttng/probes/lttng-probe-sched.c
- create mode 100644 drivers/staging/lttng/probes/lttng-type-list.h
- create mode 100644 drivers/staging/lttng/probes/lttng-types.c
- create mode 100644 drivers/staging/lttng/probes/lttng-types.h
- create mode 100644 drivers/staging/lttng/probes/lttng.h
-
-diff --git a/drivers/staging/lttng/probes/Makefile b/drivers/staging/lttng/probes/Makefile
-new file mode 100644
-index 0000000..bdc1179
---- /dev/null
-+++ b/drivers/staging/lttng/probes/Makefile
-@@ -0,0 +1,37 @@
-+#
-+# Makefile for the LTT probes.
-+#
-+
-+ccflags-y += -I$(PWD)/probes
-+obj-m += lttng-types.o
-+
-+obj-m += lttng-probe-lttng.o
-+
-+obj-m += lttng-probe-sched.o
-+obj-m += lttng-probe-irq.o
-+
-+ifneq ($(CONFIG_KVM),)
-+obj-m += lttng-probe-kvm.o
-+endif
-+
-+ifneq ($(CONFIG_BLOCK),)
-+ifneq ($(CONFIG_EVENT_TRACING),) # need blk_cmd_buf_len
-+obj-m += $(shell \
-+ if [ $(VERSION) -ge 3 \
-+ -o \( $(VERSION) -eq 2 -a $(PATCHLEVEL) -ge 6 -a $(SUBLEVEL) -ge 38 \) ] ; then \
-+ echo "lttng-probe-block.o" ; fi;)
-+endif
-+endif
-+
-+ifneq ($(CONFIG_KPROBES),)
-+obj-m += lttng-kprobes.o
-+endif
-+
-+
-+ifneq ($(CONFIG_KRETPROBES),)
-+obj-m += lttng-kretprobes.o
-+endif
-+
-+ifneq ($(CONFIG_DYNAMIC_FTRACE),)
-+obj-m += lttng-ftrace.o
-+endif
-diff --git a/drivers/staging/lttng/probes/define_trace.h b/drivers/staging/lttng/probes/define_trace.h
-new file mode 100644
-index 0000000..3c9a467
---- /dev/null
-+++ b/drivers/staging/lttng/probes/define_trace.h
-@@ -0,0 +1,132 @@
-+/*
-+ * define_trace.h
-+ *
-+ * Copyright (C) 2009 Steven Rostedt <rostedt@goodmis.org>
-+ * Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+/*
-+ * Trace files that want to automate creationg of all tracepoints defined
-+ * in their file should include this file. The following are macros that the
-+ * trace file may define:
-+ *
-+ * TRACE_SYSTEM defines the system the tracepoint is for
-+ *
-+ * TRACE_INCLUDE_FILE if the file name is something other than TRACE_SYSTEM.h
-+ * This macro may be defined to tell define_trace.h what file to include.
-+ * Note, leave off the ".h".
-+ *
-+ * TRACE_INCLUDE_PATH if the path is something other than core kernel include/trace
-+ * then this macro can define the path to use. Note, the path is relative to
-+ * define_trace.h, not the file including it. Full path names for out of tree
-+ * modules must be used.
-+ */
-+
-+#ifdef CREATE_TRACE_POINTS
-+
-+/* Prevent recursion */
-+#undef CREATE_TRACE_POINTS
-+
-+#include <linux/stringify.h>
-+/*
-+ * module.h includes tracepoints, and because ftrace.h
-+ * pulls in module.h:
-+ * trace/ftrace.h -> linux/ftrace_event.h -> linux/perf_event.h ->
-+ * linux/ftrace.h -> linux/module.h
-+ * we must include module.h here before we play with any of
-+ * the TRACE_EVENT() macros, otherwise the tracepoints included
-+ * by module.h may break the build.
-+ */
-+#include <linux/module.h>
-+
-+#undef TRACE_EVENT
-+#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
-+ DEFINE_TRACE(name)
-+
-+#undef TRACE_EVENT_CONDITION
-+#define TRACE_EVENT_CONDITION(name, proto, args, cond, tstruct, assign, print) \
-+ TRACE_EVENT(name, \
-+ PARAMS(proto), \
-+ PARAMS(args), \
-+ PARAMS(tstruct), \
-+ PARAMS(assign), \
-+ PARAMS(print))
-+
-+#undef TRACE_EVENT_FN
-+#define TRACE_EVENT_FN(name, proto, args, tstruct, \
-+ assign, print, reg, unreg) \
-+ DEFINE_TRACE_FN(name, reg, unreg)
-+
-+#undef DEFINE_EVENT
-+#define DEFINE_EVENT(template, name, proto, args) \
-+ DEFINE_TRACE(name)
-+
-+#undef DEFINE_EVENT_PRINT
-+#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
-+ DEFINE_TRACE(name)
-+
-+#undef DEFINE_EVENT_CONDITION
-+#define DEFINE_EVENT_CONDITION(template, name, proto, args, cond) \
-+ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
-+
-+#undef DECLARE_TRACE
-+#define DECLARE_TRACE(name, proto, args) \
-+ DEFINE_TRACE(name)
-+
-+#undef TRACE_INCLUDE
-+#undef __TRACE_INCLUDE
-+
-+#ifndef TRACE_INCLUDE_FILE
-+# define TRACE_INCLUDE_FILE TRACE_SYSTEM
-+# define UNDEF_TRACE_INCLUDE_FILE
-+#endif
-+
-+#ifndef TRACE_INCLUDE_PATH
-+# define __TRACE_INCLUDE(system) <trace/events/system.h>
-+# define UNDEF_TRACE_INCLUDE_PATH
-+#else
-+# define __TRACE_INCLUDE(system) __stringify(TRACE_INCLUDE_PATH/system.h)
-+#endif
-+
-+# define TRACE_INCLUDE(system) __TRACE_INCLUDE(system)
-+
-+/* Let the trace headers be reread */
-+#define TRACE_HEADER_MULTI_READ
-+
-+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-+
-+/* Make all open coded DECLARE_TRACE nops */
-+#undef DECLARE_TRACE
-+#define DECLARE_TRACE(name, proto, args)
-+
-+#ifdef LTTNG_PACKAGE_BUILD
-+#include "lttng-events.h"
-+#endif
-+
-+#undef TRACE_EVENT
-+#undef TRACE_EVENT_FN
-+#undef TRACE_EVENT_CONDITION
-+#undef DECLARE_EVENT_CLASS
-+#undef DEFINE_EVENT
-+#undef DEFINE_EVENT_PRINT
-+#undef DEFINE_EVENT_CONDITION
-+#undef TRACE_HEADER_MULTI_READ
-+#undef DECLARE_TRACE
-+
-+/* Only undef what we defined in this file */
-+#ifdef UNDEF_TRACE_INCLUDE_FILE
-+# undef TRACE_INCLUDE_FILE
-+# undef UNDEF_TRACE_INCLUDE_FILE
-+#endif
-+
-+#ifdef UNDEF_TRACE_INCLUDE_PATH
-+# undef TRACE_INCLUDE_PATH
-+# undef UNDEF_TRACE_INCLUDE_PATH
-+#endif
-+
-+/* We may be processing more files */
-+#define CREATE_TRACE_POINTS
-+
-+#endif /* CREATE_TRACE_POINTS */
-diff --git a/drivers/staging/lttng/probes/lttng-events-reset.h b/drivers/staging/lttng/probes/lttng-events-reset.h
-new file mode 100644
-index 0000000..c8a1046
---- /dev/null
-+++ b/drivers/staging/lttng/probes/lttng-events-reset.h
-@@ -0,0 +1,84 @@
-+/*
-+ * lttng-events-reset.h
-+ *
-+ * Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+/* Reset macros used within TRACE_EVENT to "nothing" */
-+
-+#undef __field_full
-+#define __field_full(_type, _item, _order, _base)
-+
-+#undef __array_enc_ext
-+#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding)
-+
-+#undef __dynamic_array_enc_ext
-+#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)
-+
-+#undef __dynamic_array_len
-+#define __dynamic_array_len(_type, _item, _length)
-+
-+#undef __string
-+#define __string(_item, _src)
-+
-+#undef tp_assign
-+#define tp_assign(dest, src)
-+
-+#undef tp_memcpy
-+#define tp_memcpy(dest, src, len)
-+
-+#undef tp_memcpy_dyn
-+#define tp_memcpy_dyn(dest, src, len)
-+
-+#undef tp_strcpy
-+#define tp_strcpy(dest, src)
-+
-+#undef __get_str
-+#define __get_str(field)
-+
-+#undef __get_dynamic_array
-+#define __get_dynamic_array(field)
-+
-+#undef __get_dynamic_array_len
-+#define __get_dynamic_array_len(field)
-+
-+#undef TP_PROTO
-+#define TP_PROTO(args...)
-+
-+#undef TP_ARGS
-+#define TP_ARGS(args...)
-+
-+#undef TP_STRUCT__entry
-+#define TP_STRUCT__entry(args...)
-+
-+#undef TP_fast_assign
-+#define TP_fast_assign(args...)
-+
-+#undef __perf_count
-+#define __perf_count(args...)
-+
-+#undef __perf_addr
-+#define __perf_addr(args...)
-+
-+#undef TP_perf_assign
-+#define TP_perf_assign(args...)
-+
-+#undef TP_printk
-+#define TP_printk(args...)
-+
-+#undef DECLARE_EVENT_CLASS
-+#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print)
-+
-+#undef DECLARE_EVENT_CLASS_NOARGS
-+#define DECLARE_EVENT_CLASS_NOARGS(_name, _tstruct, _assign, _print)
-+
-+#undef DEFINE_EVENT
-+#define DEFINE_EVENT(_template, _name, _proto, _args)
-+
-+#undef DEFINE_EVENT_NOARGS
-+#define DEFINE_EVENT_NOARGS(_template, _name)
-+
-+#undef TRACE_EVENT_FLAGS
-+#define TRACE_EVENT_FLAGS(name, value)
-diff --git a/drivers/staging/lttng/probes/lttng-events.h b/drivers/staging/lttng/probes/lttng-events.h
-new file mode 100644
-index 0000000..ff6273f
---- /dev/null
-+++ b/drivers/staging/lttng/probes/lttng-events.h
-@@ -0,0 +1,703 @@
-+/*
-+ * lttng-events.h
-+ *
-+ * Copyright (C) 2009 Steven Rostedt <rostedt@goodmis.org>
-+ * Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/debugfs.h>
-+#include "lttng.h"
-+#include "lttng-types.h"
-+#include "../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
-+#include "../wrapper/ringbuffer/frontend_types.h"
-+#include "../ltt-events.h"
-+#include "../ltt-tracer-core.h"
-+
-+/*
-+ * Macro declarations used for all stages.
-+ */
-+
-+/*
-+ * DECLARE_EVENT_CLASS can be used to add a generic function
-+ * handlers for events. That is, if all events have the same
-+ * parameters and just have distinct trace points.
-+ * Each tracepoint can be defined with DEFINE_EVENT and that
-+ * will map the DECLARE_EVENT_CLASS to the tracepoint.
-+ *
-+ * TRACE_EVENT is a one to one mapping between tracepoint and template.
-+ */
-+
-+#undef TRACE_EVENT
-+#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
-+ DECLARE_EVENT_CLASS(name, \
-+ PARAMS(proto), \
-+ PARAMS(args), \
-+ PARAMS(tstruct), \
-+ PARAMS(assign), \
-+ PARAMS(print)) \
-+ DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args))
-+
-+#undef TRACE_EVENT_NOARGS
-+#define TRACE_EVENT_NOARGS(name, tstruct, assign, print) \
-+ DECLARE_EVENT_CLASS_NOARGS(name, \
-+ PARAMS(tstruct), \
-+ PARAMS(assign), \
-+ PARAMS(print)) \
-+ DEFINE_EVENT_NOARGS(name, name)
-+
-+
-+#undef DEFINE_EVENT_PRINT
-+#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
-+ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
-+
-+/* Callbacks are meaningless to LTTng. */
-+#undef TRACE_EVENT_FN
-+#define TRACE_EVENT_FN(name, proto, args, tstruct, \
-+ assign, print, reg, unreg) \
-+ TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
-+ PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
-+
-+/*
-+ * Stage 1 of the trace events.
-+ *
-+ * Create dummy trace calls for each events, verifying that the LTTng module
-+ * TRACE_EVENT headers match the kernel arguments. Will be optimized out by the
-+ * compiler.
-+ */
-+
-+#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
-+
-+#undef TP_PROTO
-+#define TP_PROTO(args...) args
-+
-+#undef TP_ARGS
-+#define TP_ARGS(args...) args
-+
-+#undef DEFINE_EVENT
-+#define DEFINE_EVENT(_template, _name, _proto, _args) \
-+void trace_##_name(_proto);
-+
-+#undef DEFINE_EVENT_NOARGS
-+#define DEFINE_EVENT_NOARGS(_template, _name) \
-+void trace_##_name(void *__data);
-+
-+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-+
-+/*
-+ * Stage 2 of the trace events.
-+ *
-+ * Create event field type metadata section.
-+ * Each event produce an array of fields.
-+ */
-+
-+#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
-+
-+/* Named field types must be defined in lttng-types.h */
-+
-+#undef __field_full
-+#define __field_full(_type, _item, _order, _base) \
-+ { \
-+ .name = #_item, \
-+ .type = __type_integer(_type, _order, _base, none), \
-+ },
-+
-+#undef __field
-+#define __field(_type, _item) \
-+ __field_full(_type, _item, __BYTE_ORDER, 10)
-+
-+#undef __field_ext
-+#define __field_ext(_type, _item, _filter_type) \
-+ __field(_type, _item)
-+
-+#undef __field_hex
-+#define __field_hex(_type, _item) \
-+ __field_full(_type, _item, __BYTE_ORDER, 16)
-+
-+#undef __field_network
-+#define __field_network(_type, _item) \
-+ __field_full(_type, _item, __BIG_ENDIAN, 10)
-+
-+#undef __field_network_hex
-+#define __field_network_hex(_type, _item) \
-+ __field_full(_type, _item, __BIG_ENDIAN, 16)
-+
-+#undef __array_enc_ext
-+#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
-+ { \
-+ .name = #_item, \
-+ .type = \
-+ { \
-+ .atype = atype_array, \
-+ .u.array = \
-+ { \
-+ .length = _length, \
-+ .elem_type = __type_integer(_type, _order, _base, _encoding), \
-+ }, \
-+ }, \
-+ },
-+
-+#undef __array
-+#define __array(_type, _item, _length) \
-+ __array_enc_ext(_type, _item, _length, __BYTE_ORDER, 10, none)
-+
-+#undef __array_text
-+#define __array_text(_type, _item, _length) \
-+ __array_enc_ext(_type, _item, _length, __BYTE_ORDER, 10, UTF8)
-+
-+#undef __array_hex
-+#define __array_hex(_type, _item, _length) \
-+ __array_enc_ext(_type, _item, _length, __BYTE_ORDER, 16, none)
-+
-+#undef __dynamic_array_enc_ext
-+#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding) \
-+ { \
-+ .name = #_item, \
-+ .type = \
-+ { \
-+ .atype = atype_sequence, \
-+ .u.sequence = \
-+ { \
-+ .length_type = __type_integer(u32, __BYTE_ORDER, 10, none), \
-+ .elem_type = __type_integer(_type, _order, _base, _encoding), \
-+ }, \
-+ }, \
-+ },
-+
-+#undef __dynamic_array
-+#define __dynamic_array(_type, _item, _length) \
-+ __dynamic_array_enc_ext(_type, _item, _length, __BYTE_ORDER, 10, none)
-+
-+#undef __dynamic_array_text
-+#define __dynamic_array_text(_type, _item, _length) \
-+ __dynamic_array_enc_ext(_type, _item, _length, __BYTE_ORDER, 10, UTF8)
-+
-+#undef __dynamic_array_hex
-+#define __dynamic_array_hex(_type, _item, _length) \
-+ __dynamic_array_enc_ext(_type, _item, _length, __BYTE_ORDER, 16, none)
-+
-+#undef __string
-+#define __string(_item, _src) \
-+ { \
-+ .name = #_item, \
-+ .type = \
-+ { \
-+ .atype = atype_string, \
-+ .u.basic.string.encoding = lttng_encode_UTF8, \
-+ }, \
-+ },
-+
-+#undef __string_from_user
-+#define __string_from_user(_item, _src) \
-+ __string(_item, _src)
-+
-+#undef TP_STRUCT__entry
-+#define TP_STRUCT__entry(args...) args /* Only one used in this phase */
-+
-+#undef DECLARE_EVENT_CLASS_NOARGS
-+#define DECLARE_EVENT_CLASS_NOARGS(_name, _tstruct, _assign, _print) \
-+ static const struct lttng_event_field __event_fields___##_name[] = { \
-+ _tstruct \
-+ };
-+
-+#undef DECLARE_EVENT_CLASS
-+#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
-+ DECLARE_EVENT_CLASS_NOARGS(_name, PARAMS(_tstruct), PARAMS(_assign), \
-+ PARAMS(_print))
-+
-+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-+
-+/*
-+ * Stage 3 of the trace events.
-+ *
-+ * Create probe callback prototypes.
-+ */
-+
-+#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
-+
-+#undef TP_PROTO
-+#define TP_PROTO(args...) args
-+
-+#undef DECLARE_EVENT_CLASS
-+#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
-+static void __event_probe__##_name(void *__data, _proto);
-+
-+#undef DECLARE_EVENT_CLASS_NOARGS
-+#define DECLARE_EVENT_CLASS_NOARGS(_name, _tstruct, _assign, _print) \
-+static void __event_probe__##_name(void *__data);
-+
-+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-+
-+/*
-+ * Stage 3.9 of the trace events.
-+ *
-+ * Create event descriptions.
-+ */
-+
-+/* Named field types must be defined in lttng-types.h */
-+
-+#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
-+
-+#ifndef TP_PROBE_CB
-+#define TP_PROBE_CB(_template) &__event_probe__##_template
-+#endif
-+
-+#undef DEFINE_EVENT_NOARGS
-+#define DEFINE_EVENT_NOARGS(_template, _name) \
-+static const struct lttng_event_desc __event_desc___##_name = { \
-+ .fields = __event_fields___##_template, \
-+ .name = #_name, \
-+ .probe_callback = (void *) TP_PROBE_CB(_template), \
-+ .nr_fields = ARRAY_SIZE(__event_fields___##_template), \
-+ .owner = THIS_MODULE, \
-+};
-+
-+#undef DEFINE_EVENT
-+#define DEFINE_EVENT(_template, _name, _proto, _args) \
-+ DEFINE_EVENT_NOARGS(_template, _name)
-+
-+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-+
-+
-+/*
-+ * Stage 4 of the trace events.
-+ *
-+ * Create an array of event description pointers.
-+ */
-+
-+/* Named field types must be defined in lttng-types.h */
-+
-+#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
-+
-+#undef DEFINE_EVENT_NOARGS
-+#define DEFINE_EVENT_NOARGS(_template, _name) \
-+ &__event_desc___##_name,
-+
-+#undef DEFINE_EVENT
-+#define DEFINE_EVENT(_template, _name, _proto, _args) \
-+ DEFINE_EVENT_NOARGS(_template, _name)
-+
-+#define TP_ID1(_token, _system) _token##_system
-+#define TP_ID(_token, _system) TP_ID1(_token, _system)
-+
-+static const struct lttng_event_desc *TP_ID(__event_desc___, TRACE_SYSTEM)[] = {
-+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-+};
-+
-+#undef TP_ID1
-+#undef TP_ID
-+
-+
-+/*
-+ * Stage 5 of the trace events.
-+ *
-+ * Create a toplevel descriptor for the whole probe.
-+ */
-+
-+#define TP_ID1(_token, _system) _token##_system
-+#define TP_ID(_token, _system) TP_ID1(_token, _system)
-+
-+/* non-const because list head will be modified when registered. */
-+static __used struct lttng_probe_desc TP_ID(__probe_desc___, TRACE_SYSTEM) = {
-+ .event_desc = TP_ID(__event_desc___, TRACE_SYSTEM),
-+ .nr_events = ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)),
-+};
-+
-+#undef TP_ID1
-+#undef TP_ID
-+
-+/*
-+ * Stage 6 of the trace events.
-+ *
-+ * Create static inline function that calculates event size.
-+ */
-+
-+#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
-+
-+/* Named field types must be defined in lttng-types.h */
-+
-+#undef __field_full
-+#define __field_full(_type, _item, _order, _base) \
-+ __event_len += lib_ring_buffer_align(__event_len, ltt_alignof(_type)); \
-+ __event_len += sizeof(_type);
-+
-+#undef __array_enc_ext
-+#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding) \
-+ __event_len += lib_ring_buffer_align(__event_len, ltt_alignof(_type)); \
-+ __event_len += sizeof(_type) * (_length);
-+
-+#undef __dynamic_array_enc_ext
-+#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
-+ __event_len += lib_ring_buffer_align(__event_len, ltt_alignof(u32)); \
-+ __event_len += sizeof(u32); \
-+ __event_len += lib_ring_buffer_align(__event_len, ltt_alignof(_type)); \
-+ __dynamic_len[__dynamic_len_idx] = (_length); \
-+ __event_len += sizeof(_type) * __dynamic_len[__dynamic_len_idx]; \
-+ __dynamic_len_idx++;
-+
-+#undef __string
-+#define __string(_item, _src) \
-+ __event_len += __dynamic_len[__dynamic_len_idx++] = strlen(_src) + 1;
-+
-+/*
-+ * strlen_user includes \0. If returns 0, it faulted, so we set size to
-+ * 1 (\0 only).
-+ */
-+#undef __string_from_user
-+#define __string_from_user(_item, _src) \
-+ __event_len += __dynamic_len[__dynamic_len_idx++] = \
-+ min_t(size_t, strlen_user(_src), 1);
-+
-+#undef TP_PROTO
-+#define TP_PROTO(args...) args
-+
-+#undef TP_STRUCT__entry
-+#define TP_STRUCT__entry(args...) args
-+
-+#undef DECLARE_EVENT_CLASS
-+#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
-+static inline size_t __event_get_size__##_name(size_t *__dynamic_len, _proto) \
-+{ \
-+ size_t __event_len = 0; \
-+ unsigned int __dynamic_len_idx = 0; \
-+ \
-+ if (0) \
-+ (void) __dynamic_len_idx; /* don't warn if unused */ \
-+ _tstruct \
-+ return __event_len; \
-+}
-+
-+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-+
-+/*
-+ * Stage 7 of the trace events.
-+ *
-+ * Create static inline function that calculates event payload alignment.
-+ */
-+
-+#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
-+
-+/* Named field types must be defined in lttng-types.h */
-+
-+#undef __field_full
-+#define __field_full(_type, _item, _order, _base) \
-+ __event_align = max_t(size_t, __event_align, ltt_alignof(_type));
-+
-+#undef __array_enc_ext
-+#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding) \
-+ __event_align = max_t(size_t, __event_align, ltt_alignof(_type));
-+
-+#undef __dynamic_array_enc_ext
-+#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
-+ __event_align = max_t(size_t, __event_align, ltt_alignof(u32)); \
-+ __event_align = max_t(size_t, __event_align, ltt_alignof(_type));
-+
-+#undef __string
-+#define __string(_item, _src)
-+
-+#undef __string_from_user
-+#define __string_from_user(_item, _src)
-+
-+#undef TP_PROTO
-+#define TP_PROTO(args...) args
-+
-+#undef TP_STRUCT__entry
-+#define TP_STRUCT__entry(args...) args
-+
-+#undef DECLARE_EVENT_CLASS
-+#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
-+static inline size_t __event_get_align__##_name(_proto) \
-+{ \
-+ size_t __event_align = 1; \
-+ _tstruct \
-+ return __event_align; \
-+}
-+
-+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-+
-+
-+/*
-+ * Stage 8 of the trace events.
-+ *
-+ * Create structure declaration that allows the "assign" macros to access the
-+ * field types.
-+ */
-+
-+#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
-+
-+/* Named field types must be defined in lttng-types.h */
-+
-+#undef __field_full
-+#define __field_full(_type, _item, _order, _base) _type _item;
-+
-+#undef __array_enc_ext
-+#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding) \
-+ _type _item;
-+
-+#undef __dynamic_array_enc_ext
-+#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
-+ _type _item;
-+
-+#undef __string
-+#define __string(_item, _src) char _item;
-+
-+#undef __string_from_user
-+#define __string_from_user(_item, _src) \
-+ __string(_item, _src)
-+
-+#undef TP_STRUCT__entry
-+#define TP_STRUCT__entry(args...) args
-+
-+#undef DECLARE_EVENT_CLASS
-+#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
-+struct __event_typemap__##_name { \
-+ _tstruct \
-+};
-+
-+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-+
-+
-+/*
-+ * Stage 9 of the trace events.
-+ *
-+ * Create the probe function : call even size calculation and write event data
-+ * into the buffer.
-+ *
-+ * We use both the field and assignment macros to write the fields in the order
-+ * defined in the field declaration. The field declarations control the
-+ * execution order, jumping to the appropriate assignment block.
-+ */
-+
-+#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
-+
-+#undef __field_full
-+#define __field_full(_type, _item, _order, _base) \
-+ goto __assign_##_item; \
-+__end_field_##_item:
-+
-+#undef __array_enc_ext
-+#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
-+ goto __assign_##_item; \
-+__end_field_##_item:
-+
-+#undef __dynamic_array_enc_ext
-+#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
-+ goto __assign_##_item##_1; \
-+__end_field_##_item##_1: \
-+ goto __assign_##_item##_2; \
-+__end_field_##_item##_2:
-+
-+#undef __string
-+#define __string(_item, _src) \
-+ goto __assign_##_item; \
-+__end_field_##_item:
-+
-+#undef __string_from_user
-+#define __string_from_user(_item, _src) \
-+ __string(_item, _src)
-+
-+/*
-+ * Macros mapping tp_assign() to "=", tp_memcpy() to memcpy() and tp_strcpy() to
-+ * strcpy().
-+ */
-+#undef tp_assign
-+#define tp_assign(dest, src) \
-+__assign_##dest: \
-+ { \
-+ __typeof__(__typemap.dest) __tmp = (src); \
-+ lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__tmp)); \
-+ __chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp));\
-+ } \
-+ goto __end_field_##dest;
-+
-+#undef tp_memcpy
-+#define tp_memcpy(dest, src, len) \
-+__assign_##dest: \
-+ if (0) \
-+ (void) __typemap.dest; \
-+ lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest)); \
-+ __chan->ops->event_write(&__ctx, src, len); \
-+ goto __end_field_##dest;
-+
-+#undef tp_memcpy_dyn
-+#define tp_memcpy_dyn(dest, src) \
-+__assign_##dest##_1: \
-+ { \
-+ u32 __tmpl = __dynamic_len[__dynamic_len_idx]; \
-+ lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(u32)); \
-+ __chan->ops->event_write(&__ctx, &__tmpl, sizeof(u32)); \
-+ } \
-+ goto __end_field_##dest##_1; \
-+__assign_##dest##_2: \
-+ lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest)); \
-+ __chan->ops->event_write(&__ctx, src, \
-+ sizeof(__typemap.dest) * __get_dynamic_array_len(dest));\
-+ goto __end_field_##dest##_2;
-+
-+#undef tp_memcpy_from_user
-+#define tp_memcpy_from_user(dest, src, len) \
-+ __assign_##dest: \
-+ if (0) \
-+ (void) __typemap.dest; \
-+ lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest)); \
-+ __chan->ops->event_write_from_user(&__ctx, src, len); \
-+ goto __end_field_##dest;
-+
-+/*
-+ * The string length including the final \0.
-+ */
-+#undef tp_copy_string_from_user
-+#define tp_copy_string_from_user(dest, src) \
-+ __assign_##dest: \
-+ { \
-+ size_t __ustrlen; \
-+ \
-+ if (0) \
-+ (void) __typemap.dest; \
-+ lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest));\
-+ __ustrlen = __get_dynamic_array_len(dest); \
-+ if (likely(__ustrlen) > 1) { \
-+ __chan->ops->event_write_from_user(&__ctx, src, \
-+ __ustrlen - 1); \
-+ } \
-+ __chan->ops->event_memset(&__ctx, 0, 1); \
-+ } \
-+ goto __end_field_##dest;
-+#undef tp_strcpy
-+#define tp_strcpy(dest, src) \
-+ tp_memcpy(dest, src, __get_dynamic_array_len(dest))
-+
-+/* Named field types must be defined in lttng-types.h */
-+
-+#undef __get_str
-+#define __get_str(field) field
-+
-+#undef __get_dynamic_array
-+#define __get_dynamic_array(field) field
-+
-+/* Beware: this get len actually consumes the len value */
-+#undef __get_dynamic_array_len
-+#define __get_dynamic_array_len(field) __dynamic_len[__dynamic_len_idx++]
-+
-+#undef TP_PROTO
-+#define TP_PROTO(args...) args
-+
-+#undef TP_ARGS
-+#define TP_ARGS(args...) args
-+
-+#undef TP_STRUCT__entry
-+#define TP_STRUCT__entry(args...) args
-+
-+#undef TP_fast_assign
-+#define TP_fast_assign(args...) args
-+
-+#undef DECLARE_EVENT_CLASS
-+#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
-+static void __event_probe__##_name(void *__data, _proto) \
-+{ \
-+ struct ltt_event *__event = __data; \
-+ struct ltt_channel *__chan = __event->chan; \
-+ struct lib_ring_buffer_ctx __ctx; \
-+ size_t __event_len, __event_align; \
-+ size_t __dynamic_len_idx = 0; \
-+ size_t __dynamic_len[ARRAY_SIZE(__event_fields___##_name)]; \
-+ struct __event_typemap__##_name __typemap; \
-+ int __ret; \
-+ \
-+ if (0) \
-+ (void) __dynamic_len_idx; /* don't warn if unused */ \
-+ if (unlikely(!ACCESS_ONCE(__chan->session->active))) \
-+ return; \
-+ if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
-+ return; \
-+ if (unlikely(!ACCESS_ONCE(__event->enabled))) \
-+ return; \
-+ __event_len = __event_get_size__##_name(__dynamic_len, _args); \
-+ __event_align = __event_get_align__##_name(_args); \
-+ lib_ring_buffer_ctx_init(&__ctx, __chan->chan, __event, __event_len, \
-+ __event_align, -1); \
-+ __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
-+ if (__ret < 0) \
-+ return; \
-+ /* Control code (field ordering) */ \
-+ _tstruct \
-+ __chan->ops->event_commit(&__ctx); \
-+ return; \
-+ /* Copy code, steered by control code */ \
-+ _assign \
-+}
-+
-+#undef DECLARE_EVENT_CLASS_NOARGS
-+#define DECLARE_EVENT_CLASS_NOARGS(_name, _tstruct, _assign, _print) \
-+static void __event_probe__##_name(void *__data) \
-+{ \
-+ struct ltt_event *__event = __data; \
-+ struct ltt_channel *__chan = __event->chan; \
-+ struct lib_ring_buffer_ctx __ctx; \
-+ size_t __event_len, __event_align; \
-+ int __ret; \
-+ \
-+ if (unlikely(!ACCESS_ONCE(__chan->session->active))) \
-+ return; \
-+ if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
-+ return; \
-+ if (unlikely(!ACCESS_ONCE(__event->enabled))) \
-+ return; \
-+ __event_len = 0; \
-+ __event_align = 1; \
-+ lib_ring_buffer_ctx_init(&__ctx, __chan->chan, __event, __event_len, \
-+ __event_align, -1); \
-+ __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
-+ if (__ret < 0) \
-+ return; \
-+ /* Control code (field ordering) */ \
-+ _tstruct \
-+ __chan->ops->event_commit(&__ctx); \
-+ return; \
-+ /* Copy code, steered by control code */ \
-+ _assign \
-+}
-+
-+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-+
-+/*
-+ * Stage 10 of the trace events.
-+ *
-+ * Register/unregister probes at module load/unload.
-+ */
-+
-+#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
-+
-+#define TP_ID1(_token, _system) _token##_system
-+#define TP_ID(_token, _system) TP_ID1(_token, _system)
-+#define module_init_eval1(_token, _system) module_init(_token##_system)
-+#define module_init_eval(_token, _system) module_init_eval1(_token, _system)
-+#define module_exit_eval1(_token, _system) module_exit(_token##_system)
-+#define module_exit_eval(_token, _system) module_exit_eval1(_token, _system)
-+
-+#ifndef TP_MODULE_OVERRIDE
-+static int TP_ID(__lttng_events_init__, TRACE_SYSTEM)(void)
-+{
-+ wrapper_vmalloc_sync_all();
-+ return ltt_probe_register(&TP_ID(__probe_desc___, TRACE_SYSTEM));
-+}
-+
-+module_init_eval(__lttng_events_init__, TRACE_SYSTEM);
-+
-+static void TP_ID(__lttng_events_exit__, TRACE_SYSTEM)(void)
-+{
-+ ltt_probe_unregister(&TP_ID(__probe_desc___, TRACE_SYSTEM));
-+}
-+
-+module_exit_eval(__lttng_events_exit__, TRACE_SYSTEM);
-+#endif
-+
-+#undef module_init_eval
-+#undef module_exit_eval
-+#undef TP_ID1
-+#undef TP_ID
-+
-+#undef TP_PROTO
-+#undef TP_ARGS
-+#undef TRACE_EVENT_FLAGS
-diff --git a/drivers/staging/lttng/probes/lttng-ftrace.c b/drivers/staging/lttng/probes/lttng-ftrace.c
-new file mode 100644
-index 0000000..1aa7183
---- /dev/null
-+++ b/drivers/staging/lttng/probes/lttng-ftrace.c
-@@ -0,0 +1,188 @@
-+/*
-+ * (C) Copyright 2009-2011 -
-+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng function tracer integration module.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+/*
-+ * Ftrace function tracer does not seem to provide synchronization between probe
-+ * teardown and callback execution. Therefore, we make this module permanently
-+ * loaded (unloadable).
-+ *
-+ * TODO: Move to register_ftrace_function() (which is exported for
-+ * modules) for Linux >= 3.0. It is faster (only enables the selected
-+ * functions), and will stay there.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/ftrace.h>
-+#include <linux/slab.h>
-+#include "../ltt-events.h"
-+#include "../wrapper/ringbuffer/frontend_types.h"
-+#include "../wrapper/ftrace.h"
-+#include "../wrapper/vmalloc.h"
-+#include "../ltt-tracer.h"
-+
-+static
-+void lttng_ftrace_handler(unsigned long ip, unsigned long parent_ip, void **data)
-+{
-+ struct ltt_event *event = *data;
-+ struct ltt_channel *chan = event->chan;
-+ struct lib_ring_buffer_ctx ctx;
-+ struct {
-+ unsigned long ip;
-+ unsigned long parent_ip;
-+ } payload;
-+ int ret;
-+
-+ if (unlikely(!ACCESS_ONCE(chan->session->active)))
-+ return;
-+ if (unlikely(!ACCESS_ONCE(chan->enabled)))
-+ return;
-+ if (unlikely(!ACCESS_ONCE(event->enabled)))
-+ return;
-+
-+ lib_ring_buffer_ctx_init(&ctx, chan->chan, event,
-+ sizeof(payload), ltt_alignof(payload), -1);
-+ ret = chan->ops->event_reserve(&ctx, event->id);
-+ if (ret < 0)
-+ return;
-+ payload.ip = ip;
-+ payload.parent_ip = parent_ip;
-+ lib_ring_buffer_align_ctx(&ctx, ltt_alignof(payload));
-+ chan->ops->event_write(&ctx, &payload, sizeof(payload));
-+ chan->ops->event_commit(&ctx);
-+ return;
-+}
-+
-+/*
-+ * Create event description
-+ */
-+static
-+int lttng_create_ftrace_event(const char *name, struct ltt_event *event)
-+{
-+ struct lttng_event_field *fields;
-+ struct lttng_event_desc *desc;
-+ int ret;
-+
-+ desc = kzalloc(sizeof(*event->desc), GFP_KERNEL);
-+ if (!desc)
-+ return -ENOMEM;
-+ desc->name = kstrdup(name, GFP_KERNEL);
-+ if (!desc->name) {
-+ ret = -ENOMEM;
-+ goto error_str;
-+ }
-+ desc->nr_fields = 2;
-+ desc->fields = fields =
-+ kzalloc(2 * sizeof(struct lttng_event_field), GFP_KERNEL);
-+ if (!desc->fields) {
-+ ret = -ENOMEM;
-+ goto error_fields;
-+ }
-+ fields[0].name = "ip";
-+ fields[0].type.atype = atype_integer;
-+ fields[0].type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
-+ fields[0].type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
-+ fields[0].type.u.basic.integer.signedness = is_signed_type(unsigned long);
-+ fields[0].type.u.basic.integer.reverse_byte_order = 0;
-+ fields[0].type.u.basic.integer.base = 16;
-+ fields[0].type.u.basic.integer.encoding = lttng_encode_none;
-+
-+ fields[1].name = "parent_ip";
-+ fields[1].type.atype = atype_integer;
-+ fields[1].type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
-+ fields[1].type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
-+ fields[1].type.u.basic.integer.signedness = is_signed_type(unsigned long);
-+ fields[1].type.u.basic.integer.reverse_byte_order = 0;
-+ fields[1].type.u.basic.integer.base = 16;
-+ fields[1].type.u.basic.integer.encoding = lttng_encode_none;
-+
-+ desc->owner = THIS_MODULE;
-+ event->desc = desc;
-+
-+ return 0;
-+
-+error_fields:
-+ kfree(desc->name);
-+error_str:
-+ kfree(desc);
-+ return ret;
-+}
-+
-+static
-+struct ftrace_probe_ops lttng_ftrace_ops = {
-+ .func = lttng_ftrace_handler,
-+};
-+
-+int lttng_ftrace_register(const char *name,
-+ const char *symbol_name,
-+ struct ltt_event *event)
-+{
-+ int ret;
-+
-+ ret = lttng_create_ftrace_event(name, event);
-+ if (ret)
-+ goto error;
-+
-+ event->u.ftrace.symbol_name = kstrdup(symbol_name, GFP_KERNEL);
-+ if (!event->u.ftrace.symbol_name)
-+ goto name_error;
-+
-+ /* Ensure the memory we just allocated don't trigger page faults */
-+ wrapper_vmalloc_sync_all();
-+
-+ ret = wrapper_register_ftrace_function_probe(event->u.ftrace.symbol_name,
-+ &lttng_ftrace_ops, event);
-+ if (ret < 0)
-+ goto register_error;
-+ return 0;
-+
-+register_error:
-+ kfree(event->u.ftrace.symbol_name);
-+name_error:
-+ kfree(event->desc->name);
-+ kfree(event->desc);
-+error:
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(lttng_ftrace_register);
-+
-+void lttng_ftrace_unregister(struct ltt_event *event)
-+{
-+ wrapper_unregister_ftrace_function_probe(event->u.ftrace.symbol_name,
-+ &lttng_ftrace_ops, event);
-+}
-+EXPORT_SYMBOL_GPL(lttng_ftrace_unregister);
-+
-+void lttng_ftrace_destroy_private(struct ltt_event *event)
-+{
-+ kfree(event->u.ftrace.symbol_name);
-+ kfree(event->desc->fields);
-+ kfree(event->desc->name);
-+ kfree(event->desc);
-+}
-+EXPORT_SYMBOL_GPL(lttng_ftrace_destroy_private);
-+
-+int lttng_ftrace_init(void)
-+{
-+ wrapper_vmalloc_sync_all();
-+ return 0;
-+}
-+module_init(lttng_ftrace_init)
-+
-+/*
-+ * Ftrace takes care of waiting for a grace period (RCU sched) at probe
-+ * unregistration, and disables preemption around probe call.
-+ */
-+void lttng_ftrace_exit(void)
-+{
-+}
-+module_exit(lttng_ftrace_exit)
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("Linux Trace Toolkit Ftrace Support");
-diff --git a/drivers/staging/lttng/probes/lttng-kprobes.c b/drivers/staging/lttng/probes/lttng-kprobes.c
-new file mode 100644
-index 0000000..784002a
---- /dev/null
-+++ b/drivers/staging/lttng/probes/lttng-kprobes.c
-@@ -0,0 +1,164 @@
-+/*
-+ * (C) Copyright 2009-2011 -
-+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng kprobes integration module.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/kprobes.h>
-+#include <linux/slab.h>
-+#include "../ltt-events.h"
-+#include "../wrapper/ringbuffer/frontend_types.h"
-+#include "../wrapper/vmalloc.h"
-+#include "../ltt-tracer.h"
-+
-+static
-+int lttng_kprobes_handler_pre(struct kprobe *p, struct pt_regs *regs)
-+{
-+ struct ltt_event *event =
-+ container_of(p, struct ltt_event, u.kprobe.kp);
-+ struct ltt_channel *chan = event->chan;
-+ struct lib_ring_buffer_ctx ctx;
-+ int ret;
-+ unsigned long data = (unsigned long) p->addr;
-+
-+ if (unlikely(!ACCESS_ONCE(chan->session->active)))
-+ return 0;
-+ if (unlikely(!ACCESS_ONCE(chan->enabled)))
-+ return 0;
-+ if (unlikely(!ACCESS_ONCE(event->enabled)))
-+ return 0;
-+
-+ lib_ring_buffer_ctx_init(&ctx, chan->chan, event, sizeof(data),
-+ ltt_alignof(data), -1);
-+ ret = chan->ops->event_reserve(&ctx, event->id);
-+ if (ret < 0)
-+ return 0;
-+ lib_ring_buffer_align_ctx(&ctx, ltt_alignof(data));
-+ chan->ops->event_write(&ctx, &data, sizeof(data));
-+ chan->ops->event_commit(&ctx);
-+ return 0;
-+}
-+
-+/*
-+ * Create event description
-+ */
-+static
-+int lttng_create_kprobe_event(const char *name, struct ltt_event *event)
-+{
-+ struct lttng_event_field *field;
-+ struct lttng_event_desc *desc;
-+ int ret;
-+
-+ desc = kzalloc(sizeof(*event->desc), GFP_KERNEL);
-+ if (!desc)
-+ return -ENOMEM;
-+ desc->name = kstrdup(name, GFP_KERNEL);
-+ if (!desc->name) {
-+ ret = -ENOMEM;
-+ goto error_str;
-+ }
-+ desc->nr_fields = 1;
-+ desc->fields = field =
-+ kzalloc(1 * sizeof(struct lttng_event_field), GFP_KERNEL);
-+ if (!field) {
-+ ret = -ENOMEM;
-+ goto error_field;
-+ }
-+ field->name = "ip";
-+ field->type.atype = atype_integer;
-+ field->type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
-+ field->type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
-+ field->type.u.basic.integer.signedness = is_signed_type(unsigned long);
-+ field->type.u.basic.integer.reverse_byte_order = 0;
-+ field->type.u.basic.integer.base = 16;
-+ field->type.u.basic.integer.encoding = lttng_encode_none;
-+ desc->owner = THIS_MODULE;
-+ event->desc = desc;
-+
-+ return 0;
-+
-+error_field:
-+ kfree(desc->name);
-+error_str:
-+ kfree(desc);
-+ return ret;
-+}
-+
-+int lttng_kprobes_register(const char *name,
-+ const char *symbol_name,
-+ uint64_t offset,
-+ uint64_t addr,
-+ struct ltt_event *event)
-+{
-+ int ret;
-+
-+ /* Kprobes expects a NULL symbol name if unused */
-+ if (symbol_name[0] == '\0')
-+ symbol_name = NULL;
-+
-+ ret = lttng_create_kprobe_event(name, event);
-+ if (ret)
-+ goto error;
-+ memset(&event->u.kprobe.kp, 0, sizeof(event->u.kprobe.kp));
-+ event->u.kprobe.kp.pre_handler = lttng_kprobes_handler_pre;
-+ if (symbol_name) {
-+ event->u.kprobe.symbol_name =
-+ kzalloc(LTTNG_SYM_NAME_LEN * sizeof(char),
-+ GFP_KERNEL);
-+ if (!event->u.kprobe.symbol_name) {
-+ ret = -ENOMEM;
-+ goto name_error;
-+ }
-+ memcpy(event->u.kprobe.symbol_name, symbol_name,
-+ LTTNG_SYM_NAME_LEN * sizeof(char));
-+ event->u.kprobe.kp.symbol_name =
-+ event->u.kprobe.symbol_name;
-+ }
-+ event->u.kprobe.kp.offset = offset;
-+ event->u.kprobe.kp.addr = (void *) (unsigned long) addr;
-+
-+ /*
-+ * Ensure the memory we just allocated don't trigger page faults.
-+ * Well.. kprobes itself puts the page fault handler on the blacklist,
-+ * but we can never be too careful.
-+ */
-+ wrapper_vmalloc_sync_all();
-+
-+ ret = register_kprobe(&event->u.kprobe.kp);
-+ if (ret)
-+ goto register_error;
-+ return 0;
-+
-+register_error:
-+ kfree(event->u.kprobe.symbol_name);
-+name_error:
-+ kfree(event->desc->fields);
-+ kfree(event->desc->name);
-+ kfree(event->desc);
-+error:
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(lttng_kprobes_register);
-+
-+void lttng_kprobes_unregister(struct ltt_event *event)
-+{
-+ unregister_kprobe(&event->u.kprobe.kp);
-+}
-+EXPORT_SYMBOL_GPL(lttng_kprobes_unregister);
-+
-+void lttng_kprobes_destroy_private(struct ltt_event *event)
-+{
-+ kfree(event->u.kprobe.symbol_name);
-+ kfree(event->desc->fields);
-+ kfree(event->desc->name);
-+ kfree(event->desc);
-+}
-+EXPORT_SYMBOL_GPL(lttng_kprobes_destroy_private);
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("Linux Trace Toolkit Kprobes Support");
-diff --git a/drivers/staging/lttng/probes/lttng-kretprobes.c b/drivers/staging/lttng/probes/lttng-kretprobes.c
-new file mode 100644
-index 0000000..6b29101
---- /dev/null
-+++ b/drivers/staging/lttng/probes/lttng-kretprobes.c
-@@ -0,0 +1,277 @@
-+/*
-+ * (C) Copyright 2009-2011 -
-+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng kretprobes integration module.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/kprobes.h>
-+#include <linux/slab.h>
-+#include <linux/kref.h>
-+#include "../ltt-events.h"
-+#include "../wrapper/ringbuffer/frontend_types.h"
-+#include "../wrapper/vmalloc.h"
-+#include "../ltt-tracer.h"
-+
-+enum lttng_kretprobe_type {
-+ EVENT_ENTRY = 0,
-+ EVENT_RETURN = 1,
-+};
-+
-+struct lttng_krp {
-+ struct kretprobe krp;
-+ struct ltt_event *event[2]; /* ENTRY and RETURN */
-+ struct kref kref_register;
-+ struct kref kref_alloc;
-+};
-+
-+static
-+int _lttng_kretprobes_handler(struct kretprobe_instance *krpi,
-+ struct pt_regs *regs,
-+ enum lttng_kretprobe_type type)
-+{
-+ struct lttng_krp *lttng_krp =
-+ container_of(krpi->rp, struct lttng_krp, krp);
-+ struct ltt_event *event =
-+ lttng_krp->event[type];
-+ struct ltt_channel *chan = event->chan;
-+ struct lib_ring_buffer_ctx ctx;
-+ int ret;
-+ struct {
-+ unsigned long ip;
-+ unsigned long parent_ip;
-+ } payload;
-+
-+ if (unlikely(!ACCESS_ONCE(chan->session->active)))
-+ return 0;
-+ if (unlikely(!ACCESS_ONCE(chan->enabled)))
-+ return 0;
-+ if (unlikely(!ACCESS_ONCE(event->enabled)))
-+ return 0;
-+
-+ payload.ip = (unsigned long) krpi->rp->kp.addr;
-+ payload.parent_ip = (unsigned long) krpi->ret_addr;
-+
-+ lib_ring_buffer_ctx_init(&ctx, chan->chan, event, sizeof(payload),
-+ ltt_alignof(payload), -1);
-+ ret = chan->ops->event_reserve(&ctx, event->id);
-+ if (ret < 0)
-+ return 0;
-+ lib_ring_buffer_align_ctx(&ctx, ltt_alignof(payload));
-+ chan->ops->event_write(&ctx, &payload, sizeof(payload));
-+ chan->ops->event_commit(&ctx);
-+ return 0;
-+}
-+
-+static
-+int lttng_kretprobes_handler_entry(struct kretprobe_instance *krpi,
-+ struct pt_regs *regs)
-+{
-+ return _lttng_kretprobes_handler(krpi, regs, EVENT_ENTRY);
-+}
-+
-+static
-+int lttng_kretprobes_handler_return(struct kretprobe_instance *krpi,
-+ struct pt_regs *regs)
-+{
-+ return _lttng_kretprobes_handler(krpi, regs, EVENT_RETURN);
-+}
-+
-+/*
-+ * Create event description
-+ */
-+static
-+int lttng_create_kprobe_event(const char *name, struct ltt_event *event,
-+ enum lttng_kretprobe_type type)
-+{
-+ struct lttng_event_field *fields;
-+ struct lttng_event_desc *desc;
-+ int ret;
-+ char *alloc_name;
-+ size_t name_len;
-+ const char *suffix = NULL;
-+
-+ desc = kzalloc(sizeof(*event->desc), GFP_KERNEL);
-+ if (!desc)
-+ return -ENOMEM;
-+ name_len = strlen(name);
-+ switch (type) {
-+ case EVENT_ENTRY:
-+ suffix = "_entry";
-+ break;
-+ case EVENT_RETURN:
-+ suffix = "_return";
-+ break;
-+ }
-+ name_len += strlen(suffix);
-+ alloc_name = kmalloc(name_len + 1, GFP_KERNEL);
-+ if (!alloc_name) {
-+ ret = -ENOMEM;
-+ goto error_str;
-+ }
-+ strcpy(alloc_name, name);
-+ strcat(alloc_name, suffix);
-+ desc->name = alloc_name;
-+ desc->nr_fields = 2;
-+ desc->fields = fields =
-+ kzalloc(2 * sizeof(struct lttng_event_field), GFP_KERNEL);
-+ if (!desc->fields) {
-+ ret = -ENOMEM;
-+ goto error_fields;
-+ }
-+ fields[0].name = "ip";
-+ fields[0].type.atype = atype_integer;
-+ fields[0].type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
-+ fields[0].type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
-+ fields[0].type.u.basic.integer.signedness = is_signed_type(unsigned long);
-+ fields[0].type.u.basic.integer.reverse_byte_order = 0;
-+ fields[0].type.u.basic.integer.base = 16;
-+ fields[0].type.u.basic.integer.encoding = lttng_encode_none;
-+
-+ fields[1].name = "parent_ip";
-+ fields[1].type.atype = atype_integer;
-+ fields[1].type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
-+ fields[1].type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
-+ fields[1].type.u.basic.integer.signedness = is_signed_type(unsigned long);
-+ fields[1].type.u.basic.integer.reverse_byte_order = 0;
-+ fields[1].type.u.basic.integer.base = 16;
-+ fields[1].type.u.basic.integer.encoding = lttng_encode_none;
-+
-+ desc->owner = THIS_MODULE;
-+ event->desc = desc;
-+
-+ return 0;
-+
-+error_fields:
-+ kfree(desc->name);
-+error_str:
-+ kfree(desc);
-+ return ret;
-+}
-+
-+int lttng_kretprobes_register(const char *name,
-+ const char *symbol_name,
-+ uint64_t offset,
-+ uint64_t addr,
-+ struct ltt_event *event_entry,
-+ struct ltt_event *event_return)
-+{
-+ int ret;
-+ struct lttng_krp *lttng_krp;
-+
-+ /* Kprobes expects a NULL symbol name if unused */
-+ if (symbol_name[0] == '\0')
-+ symbol_name = NULL;
-+
-+ ret = lttng_create_kprobe_event(name, event_entry, EVENT_ENTRY);
-+ if (ret)
-+ goto error;
-+ ret = lttng_create_kprobe_event(name, event_return, EVENT_RETURN);
-+ if (ret)
-+ goto event_return_error;
-+ lttng_krp = kzalloc(sizeof(*lttng_krp), GFP_KERNEL);
-+ if (!lttng_krp)
-+ goto krp_error;
-+ lttng_krp->krp.entry_handler = lttng_kretprobes_handler_entry;
-+ lttng_krp->krp.handler = lttng_kretprobes_handler_return;
-+ if (symbol_name) {
-+ char *alloc_symbol;
-+
-+ alloc_symbol = kstrdup(symbol_name, GFP_KERNEL);
-+ if (!alloc_symbol) {
-+ ret = -ENOMEM;
-+ goto name_error;
-+ }
-+ lttng_krp->krp.kp.symbol_name =
-+ alloc_symbol;
-+ event_entry->u.kretprobe.symbol_name =
-+ alloc_symbol;
-+ event_return->u.kretprobe.symbol_name =
-+ alloc_symbol;
-+ }
-+ lttng_krp->krp.kp.offset = offset;
-+ lttng_krp->krp.kp.addr = (void *) (unsigned long) addr;
-+
-+ /* Allow probe handler to find event structures */
-+ lttng_krp->event[EVENT_ENTRY] = event_entry;
-+ lttng_krp->event[EVENT_RETURN] = event_return;
-+ event_entry->u.kretprobe.lttng_krp = lttng_krp;
-+ event_return->u.kretprobe.lttng_krp = lttng_krp;
-+
-+ /*
-+ * Both events must be unregistered before the kretprobe is
-+ * unregistered. Same for memory allocation.
-+ */
-+ kref_init(&lttng_krp->kref_alloc);
-+ kref_get(&lttng_krp->kref_alloc); /* inc refcount to 2 */
-+ kref_init(&lttng_krp->kref_register);
-+ kref_get(&lttng_krp->kref_register); /* inc refcount to 2 */
-+
-+ /*
-+ * Ensure the memory we just allocated don't trigger page faults.
-+ * Well.. kprobes itself puts the page fault handler on the blacklist,
-+ * but we can never be too careful.
-+ */
-+ wrapper_vmalloc_sync_all();
-+
-+ ret = register_kretprobe(&lttng_krp->krp);
-+ if (ret)
-+ goto register_error;
-+ return 0;
-+
-+register_error:
-+ kfree(lttng_krp->krp.kp.symbol_name);
-+name_error:
-+ kfree(lttng_krp);
-+krp_error:
-+ kfree(event_return->desc->fields);
-+ kfree(event_return->desc->name);
-+ kfree(event_return->desc);
-+event_return_error:
-+ kfree(event_entry->desc->fields);
-+ kfree(event_entry->desc->name);
-+ kfree(event_entry->desc);
-+error:
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(lttng_kretprobes_register);
-+
-+static
-+void _lttng_kretprobes_unregister_release(struct kref *kref)
-+{
-+ struct lttng_krp *lttng_krp =
-+ container_of(kref, struct lttng_krp, kref_register);
-+ unregister_kretprobe(&lttng_krp->krp);
-+}
-+
-+void lttng_kretprobes_unregister(struct ltt_event *event)
-+{
-+ kref_put(&event->u.kretprobe.lttng_krp->kref_register,
-+ _lttng_kretprobes_unregister_release);
-+}
-+EXPORT_SYMBOL_GPL(lttng_kretprobes_unregister);
-+
-+static
-+void _lttng_kretprobes_release(struct kref *kref)
-+{
-+ struct lttng_krp *lttng_krp =
-+ container_of(kref, struct lttng_krp, kref_alloc);
-+ kfree(lttng_krp->krp.kp.symbol_name);
-+}
-+
-+void lttng_kretprobes_destroy_private(struct ltt_event *event)
-+{
-+ kfree(event->desc->fields);
-+ kfree(event->desc->name);
-+ kfree(event->desc);
-+ kref_put(&event->u.kretprobe.lttng_krp->kref_alloc,
-+ _lttng_kretprobes_release);
-+}
-+EXPORT_SYMBOL_GPL(lttng_kretprobes_destroy_private);
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("Linux Trace Toolkit Kretprobes Support");
-diff --git a/drivers/staging/lttng/probes/lttng-probe-block.c b/drivers/staging/lttng/probes/lttng-probe-block.c
-new file mode 100644
-index 0000000..9eeebfc
---- /dev/null
-+++ b/drivers/staging/lttng/probes/lttng-probe-block.c
-@@ -0,0 +1,31 @@
-+/*
-+ * probes/lttng-probe-block.c
-+ *
-+ * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng block probes.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/blktrace_api.h>
-+
-+/*
-+ * Create the tracepoint static inlines from the kernel to validate that our
-+ * trace event macros match the kernel we run on.
-+ */
-+#include <trace/events/block.h>
-+
-+/*
-+ * Create LTTng tracepoint probes.
-+ */
-+#define LTTNG_PACKAGE_BUILD
-+#define CREATE_TRACE_POINTS
-+#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
-+
-+#include "../instrumentation/events/lttng-module/block.h"
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-+MODULE_DESCRIPTION("LTTng block probes");
-diff --git a/drivers/staging/lttng/probes/lttng-probe-irq.c b/drivers/staging/lttng/probes/lttng-probe-irq.c
-new file mode 100644
-index 0000000..4a6a322
---- /dev/null
-+++ b/drivers/staging/lttng/probes/lttng-probe-irq.c
-@@ -0,0 +1,31 @@
-+/*
-+ * probes/lttng-probe-irq.c
-+ *
-+ * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng irq probes.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+
-+/*
-+ * Create the tracepoint static inlines from the kernel to validate that our
-+ * trace event macros match the kernel we run on.
-+ */
-+#include <trace/events/irq.h>
-+
-+/*
-+ * Create LTTng tracepoint probes.
-+ */
-+#define LTTNG_PACKAGE_BUILD
-+#define CREATE_TRACE_POINTS
-+#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
-+
-+#include "../instrumentation/events/lttng-module/irq.h"
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-+MODULE_DESCRIPTION("LTTng irq probes");
-diff --git a/drivers/staging/lttng/probes/lttng-probe-kvm.c b/drivers/staging/lttng/probes/lttng-probe-kvm.c
-new file mode 100644
-index 0000000..9efc6dd
---- /dev/null
-+++ b/drivers/staging/lttng/probes/lttng-probe-kvm.c
-@@ -0,0 +1,31 @@
-+/*
-+ * probes/lttng-probe-kvm.c
-+ *
-+ * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng kvm probes.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/kvm_host.h>
-+
-+/*
-+ * Create the tracepoint static inlines from the kernel to validate that our
-+ * trace event macros match the kernel we run on.
-+ */
-+#include <trace/events/kvm.h>
-+
-+/*
-+ * Create LTTng tracepoint probes.
-+ */
-+#define LTTNG_PACKAGE_BUILD
-+#define CREATE_TRACE_POINTS
-+#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
-+
-+#include "../instrumentation/events/lttng-module/kvm.h"
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-+MODULE_DESCRIPTION("LTTng kvm probes");
-diff --git a/drivers/staging/lttng/probes/lttng-probe-lttng.c b/drivers/staging/lttng/probes/lttng-probe-lttng.c
-new file mode 100644
-index 0000000..62aab6c
---- /dev/null
-+++ b/drivers/staging/lttng/probes/lttng-probe-lttng.c
-@@ -0,0 +1,24 @@
-+/*
-+ * probes/lttng-probe-core.c
-+ *
-+ * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng core probes.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+
-+/*
-+ * Create LTTng tracepoint probes.
-+ */
-+#define LTTNG_PACKAGE_BUILD
-+#define CREATE_TRACE_POINTS
-+#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
-+
-+#include "../instrumentation/events/lttng-module/lttng.h"
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-+MODULE_DESCRIPTION("LTTng core probes");
-diff --git a/drivers/staging/lttng/probes/lttng-probe-sched.c b/drivers/staging/lttng/probes/lttng-probe-sched.c
-new file mode 100644
-index 0000000..18c1521
---- /dev/null
-+++ b/drivers/staging/lttng/probes/lttng-probe-sched.c
-@@ -0,0 +1,30 @@
-+/*
-+ * probes/lttng-probe-sched.c
-+ *
-+ * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng sched probes.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+
-+/*
-+ * Create the tracepoint static inlines from the kernel to validate that our
-+ * trace event macros match the kernel we run on.
-+ */
-+#include <trace/events/sched.h>
-+
-+/*
-+ * Create LTTng tracepoint probes.
-+ */
-+#define LTTNG_PACKAGE_BUILD
-+#define CREATE_TRACE_POINTS
-+#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
-+
-+#include "../instrumentation/events/lttng-module/sched.h"
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-+MODULE_DESCRIPTION("LTTng sched probes");
-diff --git a/drivers/staging/lttng/probes/lttng-type-list.h b/drivers/staging/lttng/probes/lttng-type-list.h
-new file mode 100644
-index 0000000..7b953db
---- /dev/null
-+++ b/drivers/staging/lttng/probes/lttng-type-list.h
-@@ -0,0 +1,21 @@
-+/*
-+ * lttng-type-list.h
-+ *
-+ * Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+/* Type list, used to create metadata */
-+
-+/* Enumerations */
-+TRACE_EVENT_ENUM(hrtimer_mode,
-+ V(HRTIMER_MODE_ABS),
-+ V(HRTIMER_MODE_REL),
-+ V(HRTIMER_MODE_PINNED),
-+ V(HRTIMER_MODE_ABS_PINNED),
-+ V(HRTIMER_MODE_REL_PINNED),
-+ R(HRTIMER_MODE_UNDEFINED, 0x04, 0x20), /* Example (to remove) */
-+)
-+
-+TRACE_EVENT_TYPE(hrtimer_mode, enum, unsigned char)
-diff --git a/drivers/staging/lttng/probes/lttng-types.c b/drivers/staging/lttng/probes/lttng-types.c
-new file mode 100644
-index 0000000..93a9ae5
---- /dev/null
-+++ b/drivers/staging/lttng/probes/lttng-types.c
-@@ -0,0 +1,49 @@
-+/*
-+ * probes/lttng-types.c
-+ *
-+ * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng types.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/types.h>
-+#include "../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
-+#include "../ltt-events.h"
-+#include "lttng-types.h"
-+#include <linux/hrtimer.h>
-+
-+#define STAGE_EXPORT_ENUMS
-+#include "lttng-types.h"
-+#include "lttng-type-list.h"
-+#undef STAGE_EXPORT_ENUMS
-+
-+struct lttng_enum lttng_enums[] = {
-+#define STAGE_EXPORT_TYPES
-+#include "lttng-types.h"
-+#include "lttng-type-list.h"
-+#undef STAGE_EXPORT_TYPES
-+};
-+
-+static int lttng_types_init(void)
-+{
-+ int ret = 0;
-+
-+ wrapper_vmalloc_sync_all();
-+ /* TODO */
-+ return ret;
-+}
-+
-+module_init(lttng_types_init);
-+
-+static void lttng_types_exit(void)
-+{
-+}
-+
-+module_exit(lttng_types_exit);
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-+MODULE_DESCRIPTION("LTTng types");
-diff --git a/drivers/staging/lttng/probes/lttng-types.h b/drivers/staging/lttng/probes/lttng-types.h
-new file mode 100644
-index 0000000..1062028
---- /dev/null
-+++ b/drivers/staging/lttng/probes/lttng-types.h
-@@ -0,0 +1,72 @@
-+/*
-+ * Protect against multiple inclusion of structure declarations, but run the
-+ * stages below each time.
-+ */
-+#ifndef _LTTNG_PROBES_LTTNG_TYPES_H
-+#define _LTTNG_PROBES_LTTNG_TYPES_H
-+
-+/*
-+ * probes/lttng-types.h
-+ *
-+ * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * LTTng types.
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#include <linux/seq_file.h>
-+#include "lttng.h"
-+#include "../ltt-events.h"
-+#include "../ltt-tracer.h"
-+#include "../ltt-endian.h"
-+
-+#endif /* _LTTNG_PROBES_LTTNG_TYPES_H */
-+
-+/* Export enumerations */
-+
-+#ifdef STAGE_EXPORT_ENUMS
-+
-+#undef TRACE_EVENT_TYPE
-+#define TRACE_EVENT_TYPE(_name, _abstract_type, args...)
-+
-+#undef TRACE_EVENT_ENUM
-+#define TRACE_EVENT_ENUM(_name, _entries...) \
-+ const struct lttng_enum_entry __trace_event_enum_##_name[] = { \
-+ PARAMS(_entries) \
-+ };
-+
-+/* Enumeration entry (single value) */
-+#undef V
-+#define V(_string) { _string, _string, #_string}
-+
-+/* Enumeration entry (range) */
-+#undef R
-+#define R(_string, _range_start, _range_end) \
-+ { _range_start, _range_end, #_string }
-+
-+#endif /* STAGE_EXPORT_ENUMS */
-+
-+
-+/* Export named types */
-+
-+#ifdef STAGE_EXPORT_TYPES
-+
-+#undef TRACE_EVENT_TYPE___enum
-+#define TRACE_EVENT_TYPE___enum(_name, _container_type) \
-+ { \
-+ .name = #_name, \
-+ .container_type = __type_integer(_container_type, __BYTE_ORDER, 10, none), \
-+ .entries = __trace_event_enum_##_name, \
-+ .len = ARRAY_SIZE(__trace_event_enum_##_name), \
-+ },
-+
-+/* Local declaration */
-+#undef TRACE_EVENT_TYPE
-+#define TRACE_EVENT_TYPE(_name, _abstract_type, args...) \
-+ TRACE_EVENT_TYPE___##_abstract_type(_name, args)
-+
-+#undef TRACE_EVENT_ENUM
-+#define TRACE_EVENT_ENUM(_name, _entries...)
-+
-+#endif /* STAGE_EXPORT_TYPES */
-diff --git a/drivers/staging/lttng/probes/lttng.h b/drivers/staging/lttng/probes/lttng.h
-new file mode 100644
-index 0000000..e16fc2d
---- /dev/null
-+++ b/drivers/staging/lttng/probes/lttng.h
-@@ -0,0 +1,15 @@
-+#ifndef _LTTNG_PROBES_LTTNG_H
-+#define _LTTNG_PROBES_LTTNG_H
-+
-+/*
-+ * lttng.h
-+ *
-+ * Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * Dual LGPL v2.1/GPL v2 license.
-+ */
-+
-+#undef PARAMS
-+#define PARAMS(args...) args
-+
-+#endif /* _LTTNG_PROBES_LTTNG_H */
---
-1.7.9
-
diff --git a/patches.lttng/0017-lttng-toplevel-Makefile-and-Kconfig.patch b/patches.lttng/0017-lttng-toplevel-Makefile-and-Kconfig.patch
deleted file mode 100644
index aad46b07ea9..00000000000
--- a/patches.lttng/0017-lttng-toplevel-Makefile-and-Kconfig.patch
+++ /dev/null
@@ -1,97 +0,0 @@
-From 3c46c44027a86fc8a008a6096b0e5b8f5a4afcb5 Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 28 Nov 2011 07:42:25 -0500
-Subject: lttng: toplevel Makefile and Kconfig
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- drivers/staging/lttng/Kconfig | 35 +++++++++++++++++++++++++++++++++++
- drivers/staging/lttng/Makefile | 33 +++++++++++++++++++++++++++++++++
- 2 files changed, 68 insertions(+), 0 deletions(-)
- create mode 100644 drivers/staging/lttng/Kconfig
- create mode 100644 drivers/staging/lttng/Makefile
-
-diff --git a/drivers/staging/lttng/Kconfig b/drivers/staging/lttng/Kconfig
-new file mode 100644
-index 0000000..34c4a4f
---- /dev/null
-+++ b/drivers/staging/lttng/Kconfig
-@@ -0,0 +1,35 @@
-+config LTTNG
-+ tristate "LTTng kernel tracer"
-+ depends on TRACEPOINTS
-+ help
-+ The LTTng 2.0 Tracer Toolchain allows integrated kernel and
-+ user-space tracing from a single user interface: the "lttng"
-+ command. See http://lttng.org website for the "lttng-tools"
-+ user-space tracer control tools package and the "babeltrace"
-+ package for conversion of trace data to a human-readable
-+ format.
-+
-+ LTTng features:
-+ - System-wide tracing across kernel, libraries and
-+ applications,
-+ - Tracepoints, detailed syscall tracing (fast strace replacement),
-+ Function tracer, CPU Performance Monitoring Unit (PMU) counters
-+ and kprobes support,
-+ - Have the ability to attach "context" information to events in the
-+ trace (e.g. any PMU counter, pid, ppid, tid, comm name, etc). All
-+ the extra information fields to be collected with events are
-+ optional, specified on a per-tracing-session basis (except for
-+ timestamp and event id, which are mandatory).
-+ - Precise and fast clock sources with near cycle-level
-+ timestamps,
-+ - Efficient trace data transport:
-+ - Compact Binary format with CTF,
-+ - Per-core buffers ensures scalability,
-+ - Fast-paths in caller context, amortized synchronization,
-+ - Zero-copy using splice and mmap system calls, over disk,
-+ network or consumed in-place,
-+ - Multiple concurrent tracing sessions are supported,
-+ - Designed to meet hard real-time constraints,
-+ - Supports live streaming of the trace data,
-+ - Produces CTF (Common Trace Format) natively (see
-+ http://www.efficios.com/ctf).
-diff --git a/drivers/staging/lttng/Makefile b/drivers/staging/lttng/Makefile
-new file mode 100644
-index 0000000..9ad4eb0
---- /dev/null
-+++ b/drivers/staging/lttng/Makefile
-@@ -0,0 +1,33 @@
-+#
-+# Makefile for the LTTng modules.
-+#
-+
-+obj-m += ltt-ring-buffer-client-discard.o
-+obj-m += ltt-ring-buffer-client-overwrite.o
-+obj-m += ltt-ring-buffer-metadata-client.o
-+obj-m += ltt-ring-buffer-client-mmap-discard.o
-+obj-m += ltt-ring-buffer-client-mmap-overwrite.o
-+obj-m += ltt-ring-buffer-metadata-mmap-client.o
-+
-+obj-m += ltt-relay.o
-+ltt-relay-objs := ltt-events.o ltt-debugfs-abi.o \
-+ ltt-probes.o ltt-context.o \
-+ lttng-context-pid.o lttng-context-procname.o \
-+ lttng-context-prio.o lttng-context-nice.o \
-+ lttng-context-vpid.o lttng-context-tid.o \
-+ lttng-context-vtid.o lttng-context-ppid.o \
-+ lttng-context-vppid.o lttng-calibrate.o
-+
-+ifneq ($(CONFIG_HAVE_SYSCALL_TRACEPOINTS),)
-+ltt-relay-objs += lttng-syscalls.o
-+endif
-+
-+ifneq ($(CONFIG_PERF_EVENTS),)
-+ltt-relay-objs += $(shell \
-+ if [ $(VERSION) -ge 3 \
-+ -o \( $(VERSION) -eq 2 -a $(PATCHLEVEL) -ge 6 -a $(SUBLEVEL) -ge 33 \) ] ; then \
-+ echo "lttng-context-perf-counters.o" ; fi;)
-+endif
-+
-+obj-m += probes/
-+obj-m += lib/
---
-1.7.9
-
diff --git a/patches.lttng/0018-staging-add-LTTng-to-build.patch b/patches.lttng/0018-staging-add-LTTng-to-build.patch
deleted file mode 100644
index defd04e6e55..00000000000
--- a/patches.lttng/0018-staging-add-LTTng-to-build.patch
+++ /dev/null
@@ -1,28 +0,0 @@
-From 9b6d12198448aa51979b81aa68651cb49c0c5a02 Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 28 Nov 2011 07:42:26 -0500
-Subject: staging: add LTTng to build
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- drivers/staging/Kconfig | 2 ++
- drivers/staging/Makefile | 1 +
- 2 files changed, 3 insertions(+)
-
---- a/drivers/staging/Kconfig
-+++ b/drivers/staging/Kconfig
-@@ -140,4 +140,6 @@ source "drivers/staging/netlogic/Kconfig
-
- source "drivers/staging/dwc2/Kconfig"
-
-+source "drivers/staging/lttng/Kconfig"
-+
- endif # STAGING
---- a/drivers/staging/Makefile
-+++ b/drivers/staging/Makefile
-@@ -62,3 +62,4 @@ obj-$(CONFIG_FIREWIRE_SERIAL) += fwseria
- obj-$(CONFIG_ZCACHE) += zcache/
- obj-$(CONFIG_GOLDFISH) += goldfish/
- obj-$(CONFIG_USB_DWC2) += dwc2/
-+obj-$(CONFIG_LTTNG) += lttng/
diff --git a/patches.lttng/0019-staging-Add-LTTng-entry-to-MAINTAINERS-file.patch b/patches.lttng/0019-staging-Add-LTTng-entry-to-MAINTAINERS-file.patch
deleted file mode 100644
index e4318eac042..00000000000
--- a/patches.lttng/0019-staging-Add-LTTng-entry-to-MAINTAINERS-file.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From 70bd4399bbdd4dd35697664af00fcd48cb2008a2 Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 28 Nov 2011 07:42:27 -0500
-Subject: staging: Add LTTng entry to MAINTAINERS file
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- MAINTAINERS | 7 +++++++
- 1 file changed, 7 insertions(+)
-
---- a/MAINTAINERS
-+++ b/MAINTAINERS
-@@ -5079,6 +5079,13 @@ T: git git://github.com/linux-test-proje
- T: git git://ltp.git.sourceforge.net/gitroot/ltp/ltp-dev
- S: Maintained
-
-+LTTng (Linux Trace Toolkit Next Generation)
-+M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+L: lttng-dev@lists.lttng.org (moderated for non-subscribers)
-+W: http://lttng.org
-+S: Maintained
-+F: drivers/staging/lttng/
-+
- M32R ARCHITECTURE
- M: Hirokazu Takata <takata@linux-m32r.org>
- L: linux-m32r@ml.linux-m32r.org (moderated for non-subscribers)
diff --git a/patches.lttng/0069-lttng-lib-ring-buffer-remove-stale-null-pointer.patch b/patches.lttng/0069-lttng-lib-ring-buffer-remove-stale-null-pointer.patch
deleted file mode 100644
index bb268aca581..00000000000
--- a/patches.lttng/0069-lttng-lib-ring-buffer-remove-stale-null-pointer.patch
+++ /dev/null
@@ -1,63 +0,0 @@
-From 2f8e0b31ad257bca9ff5dda9fdfdcc98d38d97f8 Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Wed, 30 Nov 2011 13:34:14 -0500
-Subject: lttng lib: ring buffer: remove stale null-pointer
-
-* Dan Carpenter <dan.carpenter@oracle.com> wrote:
-[...]
-> The patch c844b2f5cfea: "lttng lib: ring buffer" from Nov 28, 2011,
-> leads to the following Smatch complaint:
->
-> drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c +1150
-> +lib_ring_buffer_print_buffer_errors()
-> warn: variable dereferenced before check 'chan' (see line 1143)
->
-> drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c
-> 1142 {
-> 1143 const struct lib_ring_buffer_config *config =
-> +chan->backend.config;
->
-> +^^^^^^^^^^^^^^^^^^^^
-> Dereference.
->
-> 1144 unsigned long write_offset, cons_offset;
-> 1145
-> 1146 /*
-> 1147 * Can be called in the error path of allocation when
-> 1148 * trans_channel_data is not yet set.
-> 1149 */
-> 1150 if (!chan)
-> ^^^^^^^^^
-> Check. At first glance the comment seems out of date, I think check can
-> be removed safely.
->
-> 1151 return;
-> 1152 /*
-
-Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- .../lttng/lib/ringbuffer/ring_buffer_frontend.c | 6 ------
- 1 files changed, 0 insertions(+), 6 deletions(-)
-
-diff --git a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c
-index 802f5cd..957d7f3 100644
---- a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c
-+++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c
-@@ -1144,12 +1144,6 @@ void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf,
- unsigned long write_offset, cons_offset;
-
- /*
-- * Can be called in the error path of allocation when
-- * trans_channel_data is not yet set.
-- */
-- if (!chan)
-- return;
-- /*
- * No need to order commit_count, write_offset and cons_offset reads
- * because we execute at teardown when no more writer nor reader
- * references are left.
---
-1.7.9
-
diff --git a/patches.lttng/0070-lttng-lib-ring-buffer-remove-duplicate-null-pointer.patch b/patches.lttng/0070-lttng-lib-ring-buffer-remove-duplicate-null-pointer.patch
deleted file mode 100644
index dccc66032ad..00000000000
--- a/patches.lttng/0070-lttng-lib-ring-buffer-remove-duplicate-null-pointer.patch
+++ /dev/null
@@ -1,60 +0,0 @@
-From e5f7787388da7562b955a36b46e909e500a5974b Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Wed, 30 Nov 2011 13:34:15 -0500
-Subject: lttng lib: ring buffer remove duplicate null pointer
-
-* Dan Carpenter <dan.carpenter@oracle.com> wrote:
-> The patch c844b2f5cfea: "lttng lib: ring buffer" from Nov 28, 2011,
-> leads to the following Smatch complaint:
->
-> drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c +33
-> +lib_ring_buffer_fault()
-> warn: variable dereferenced before check 'buf' (see line 26)
->
-> drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c
-> 25 struct lib_ring_buffer *buf = vma->vm_private_data;
-> 26 struct channel *chan = buf->backend.chan;
-> ^^^^^^^^^^^^^^^^^
-> Dereference.
->
-> 27 const struct lib_ring_buffer_config *config = chan->backend.config;
-> 28 pgoff_t pgoff = vmf->pgoff;
-> 29 struct page **page;
-> 30 void **virt;
-> 31 unsigned long offset, sb_bindex;
-> 32
-> 33 if (!buf)
-> ^^^^
-> Check.
->
-> 34 return VM_FAULT_OOM;
-> 35
-
-This check is performed at mapping setup time in
-lib_ring_buffer_mmap_buf() already, so we can safely remove this
-duplicata.
-
-Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- .../lttng/lib/ringbuffer/ring_buffer_mmap.c | 3 ---
- 1 files changed, 0 insertions(+), 3 deletions(-)
-
-diff --git a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c
-index 68221ee..cf37434 100644
---- a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c
-+++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c
-@@ -30,9 +30,6 @@ static int lib_ring_buffer_fault(struct vm_area_struct *vma, struct vm_fault *vm
- void **virt;
- unsigned long offset, sb_bindex;
-
-- if (!buf)
-- return VM_FAULT_OOM;
--
- /*
- * Verify that faults are only done on the range of pages owned by the
- * reader.
---
-1.7.9
-
diff --git a/patches.lttng/0071-lttng-lib-ring-buffer-move-null-pointer-check-to-ope.patch b/patches.lttng/0071-lttng-lib-ring-buffer-move-null-pointer-check-to-ope.patch
deleted file mode 100644
index 6d6d98bbf79..00000000000
--- a/patches.lttng/0071-lttng-lib-ring-buffer-move-null-pointer-check-to-ope.patch
+++ /dev/null
@@ -1,75 +0,0 @@
-From eeb34e2113576aea782094d1e30f22b445355fe8 Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Wed, 30 Nov 2011 13:34:16 -0500
-Subject: lttng lib: ring buffer move null pointer check to open
-
-* Dan Carpenter <dan.carpenter@oracle.com> wrote:
-> The patch c844b2f5cfea: "lttng lib: ring buffer" from Nov 28, 2011,
-> leads to the following Smatch complaint:
->
-> drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c +86
-> +lib_ring_buffer_mmap_buf()
-> warn: variable dereferenced before check 'buf' (see line 79)
->
-> drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c
-> 78 unsigned long length = vma->vm_end - vma->vm_start;
-> 79 struct channel *chan = buf->backend.chan;
-> ^^^^^^^^^^^^^^^^^
-> Dereference.
->
-> 80 const struct lib_ring_buffer_config *config = chan->backend.config;
-> 81 unsigned long mmap_buf_len;
-> 82
-> 83 if (config->output != RING_BUFFER_MMAP)
-> 84 return -EINVAL;
-> 85
-> 86 if (!buf)
-> ^^^^
-> Check.
->
-> 87 return -EBADF;
-> 88
-
-Let's move the NULL buf check to the file "open", where it belongs. The
-"open" file operation is the actual interface between lib ring buffer
-and the modules using it.
-
-Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- .../lttng/lib/ringbuffer/ring_buffer_mmap.c | 3 ---
- .../staging/lttng/lib/ringbuffer/ring_buffer_vfs.c | 3 +++
- 2 files changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c
-index cf37434..c9d6e89 100644
---- a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c
-+++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c
-@@ -80,9 +80,6 @@ static int lib_ring_buffer_mmap_buf(struct lib_ring_buffer *buf,
- if (config->output != RING_BUFFER_MMAP)
- return -EINVAL;
-
-- if (!buf)
-- return -EBADF;
--
- mmap_buf_len = chan->backend.buf_size;
- if (chan->backend.extra_reader_sb)
- mmap_buf_len += chan->backend.subbuf_size;
-diff --git a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_vfs.c b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_vfs.c
-index 1708ffd..8b78305 100644
---- a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_vfs.c
-+++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_vfs.c
-@@ -42,6 +42,9 @@ int lib_ring_buffer_open(struct inode *inode, struct file *file)
- struct lib_ring_buffer *buf = inode->i_private;
- int ret;
-
-+ if (!buf)
-+ return -EINVAL;
-+
- ret = lib_ring_buffer_open_read(buf);
- if (ret)
- return ret;
---
-1.7.9
-
diff --git a/patches.lttng/0072-lttng-wrapper-add-missing-include-to-kallsyms-wrappe.patch b/patches.lttng/0072-lttng-wrapper-add-missing-include-to-kallsyms-wrappe.patch
deleted file mode 100644
index 8f2d8fb21b1..00000000000
--- a/patches.lttng/0072-lttng-wrapper-add-missing-include-to-kallsyms-wrappe.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From 91c0a213f10aa6637f680848e474fb107dd41ecf Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Wed, 30 Nov 2011 13:34:18 -0500
-Subject: lttng wrapper: add missing include to kallsyms wrapper
-
-Needed to keep bissectability.
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- drivers/staging/lttng/wrapper/kallsyms.h | 2 ++
- 1 files changed, 2 insertions(+), 0 deletions(-)
-
-diff --git a/drivers/staging/lttng/wrapper/kallsyms.h b/drivers/staging/lttng/wrapper/kallsyms.h
-index bb45f38..a7b8ab1 100644
---- a/drivers/staging/lttng/wrapper/kallsyms.h
-+++ b/drivers/staging/lttng/wrapper/kallsyms.h
-@@ -1,6 +1,8 @@
- #ifndef _LTT_WRAPPER_KALLSYMS_H
- #define _LTT_WRAPPER_KALLSYMS_H
-
-+#include <linux/kallsyms.h>
-+
- /*
- * Copyright (C) 2011 Avik Sil (avik.sil@linaro.org)
- *
---
-1.7.9
-
diff --git a/patches.lttng/0073-staging-lttng-cleanup-one-bit-signed-bitfields.patch b/patches.lttng/0073-staging-lttng-cleanup-one-bit-signed-bitfields.patch
deleted file mode 100644
index c85c46619e1..00000000000
--- a/patches.lttng/0073-staging-lttng-cleanup-one-bit-signed-bitfields.patch
+++ /dev/null
@@ -1,209 +0,0 @@
-From 0dcbcbb49e3e8636e2f9d8cbcbeea827c5c951d9 Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Thu, 1 Dec 2011 09:31:18 -0500
-Subject: staging: lttng: cleanup one-bit signed bitfields
-
-* Dan Carpenter <dan.carpenter@oracle.com> wrote:
-> Sparse complains that these signed bitfields look "dubious". The
-> problem is that instead of being either 0 or 1 like people would expect,
-> signed one bit variables like this are either 0 or -1. It doesn't cause
-> a problem in this case but it's ugly so lets fix them.
-
-* walter harms (wharms@bfs.de) wrote:
-> hi,
-> This patch looks ok to me but this design is ugly by itself.
-> It should be replaced by an uchar uint whatever or use a
-> real bool (obviously not preferred by this programmes).
-
-bool :1, uchar :1 or uint :1 could make sense. uchar:1/bool:1 won't save
-any space here, because the surrounding fields are either uint or
-pointers, so alignment will just add padding.
-
-I try to use int/uint whenever possible because x86 CPUs tend to get
-less register false-dependencies when using instructions modifying the
-whole register (generated by using int/uint types) rather than only part
-of it (uchar/char/bool). I only use char/uchar/bool when there is a
-clear wanted space gain.
-
-The reason why I never use the bool type within a structure when I want
-a compact representation is that bool takes a whole byte just to
-represent one bit:
-
-struct usebitfield {
- int a;
- unsigned int f:1, g:1, h:1, i:1, j:1;
- int b;
-};
-
-struct usebool {
- int a;
- bool f, g, h, i, j;
- int b;
-};
-
-struct useboolbf {
- int a;
- bool f:1, g:1, h:1, i:1, j:1;
- int b;
-};
-
-int main()
-{
- printf("bitfield %d bytes, bool %d bytes, boolbitfield %d bytes\n",
- sizeof(struct usebitfield), sizeof(struct usebool),
- sizeof(struct useboolbf));
-}
-
-result:
-
-bitfield 12 bytes, bool 16 bytes, boolbitfield 12 bytes
-
-This is because each bool takes one byte, while the bitfields are put in
-units of "unsigned int" (or bool for the 3rd struct). So in this
-example, we need 5 bytes + 3 bytes alignment for the bool, but only 4
-bytes to hold the "unsigned int" unit for the bitfields.
-
-The choice between bool and bitfields must also take into account the
-frequency of access to the variable, because bitfields require mask
-operations to access the selected bit(s). You will notice that none of
-these bitfields are accessed on the tracing fast-path: only in
-slow-paths. Therefore, space gain is more important than speed here.
-
-One might argue that I have so few of these fields here that it does not
-make an actual difference to go for bitfield or bool. I am just trying
-to choose types best suited for their intended purpose, ensuring they
-are future-proof and will allow simply adding more fields using the same
-type, as needed.
-
-So I guess I'll go for uint :1.
-
-Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Acked-by: Dan Carpenter <dan.carpenter@oracle.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- .../staging/lttng/lib/ringbuffer/backend_types.h | 4 ++--
- .../staging/lttng/lib/ringbuffer/frontend_types.h | 14 +++++++-------
- .../lttng/lib/ringbuffer/ring_buffer_frontend.c | 2 +-
- drivers/staging/lttng/ltt-events.h | 10 +++++-----
- 4 files changed, 15 insertions(+), 15 deletions(-)
-
-diff --git a/drivers/staging/lttng/lib/ringbuffer/backend_types.h b/drivers/staging/lttng/lib/ringbuffer/backend_types.h
-index 1d301de..25c41bc 100644
---- a/drivers/staging/lttng/lib/ringbuffer/backend_types.h
-+++ b/drivers/staging/lttng/lib/ringbuffer/backend_types.h
-@@ -53,7 +53,7 @@ struct lib_ring_buffer_backend {
- struct channel *chan; /* Associated channel */
- int cpu; /* This buffer's cpu. -1 if global. */
- union v_atomic records_read; /* Number of records read */
-- unsigned int allocated:1; /* Bool: is buffer allocated ? */
-+ uint allocated:1; /* is buffer allocated ? */
- };
-
- struct channel_backend {
-@@ -65,7 +65,7 @@ struct channel_backend {
- * for writer.
- */
- unsigned int buf_size_order; /* Order of buffer size */
-- int extra_reader_sb:1; /* Bool: has extra reader subbuffer */
-+ uint extra_reader_sb:1; /* has extra reader subbuffer ? */
- struct lib_ring_buffer *buf; /* Channel per-cpu buffers */
-
- unsigned long num_subbuf; /* Number of sub-buffers for writer */
-diff --git a/drivers/staging/lttng/lib/ringbuffer/frontend_types.h b/drivers/staging/lttng/lib/ringbuffer/frontend_types.h
-index 5c7437f..eced7be 100644
---- a/drivers/staging/lttng/lib/ringbuffer/frontend_types.h
-+++ b/drivers/staging/lttng/lib/ringbuffer/frontend_types.h
-@@ -60,8 +60,8 @@ struct channel {
- struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */
- struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */
- struct notifier_block hp_iter_notifier; /* hotplug iterator notifier */
-- int cpu_hp_enable:1; /* Enable CPU hotplug notif. */
-- int hp_iter_enable:1; /* Enable hp iter notif. */
-+ uint cpu_hp_enable:1; /* Enable CPU hotplug notif. */
-+ uint hp_iter_enable:1; /* Enable hp iter notif. */
- wait_queue_head_t read_wait; /* reader wait queue */
- wait_queue_head_t hp_wait; /* CPU hotplug wait queue */
- int finalized; /* Has channel been finalized */
-@@ -94,8 +94,8 @@ struct lib_ring_buffer_iter {
- ITER_NEXT_RECORD,
- ITER_PUT_SUBBUF,
- } state;
-- int allocated:1;
-- int read_open:1; /* Opened for reading ? */
-+ uint allocated:1;
-+ uint read_open:1; /* Opened for reading ? */
- };
-
- /* ring buffer state */
-@@ -138,9 +138,9 @@ struct lib_ring_buffer {
- unsigned long get_subbuf_consumed; /* Read-side consumed */
- unsigned long prod_snapshot; /* Producer count snapshot */
- unsigned long cons_snapshot; /* Consumer count snapshot */
-- int get_subbuf:1; /* Sub-buffer being held by reader */
-- int switch_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
-- int read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
-+ uint get_subbuf:1; /* Sub-buffer being held by reader */
-+ uint switch_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
-+ uint read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
- };
-
- static inline
-diff --git a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c
-index 957d7f3..348c05e 100644
---- a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c
-+++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c
-@@ -54,7 +54,7 @@
- struct switch_offsets {
- unsigned long begin, end, old;
- size_t pre_header_padding, size;
-- unsigned int switch_new_start:1, switch_new_end:1, switch_old_start:1,
-+ uint switch_new_start:1, switch_new_end:1, switch_old_start:1,
- switch_old_end:1;
- };
-
-diff --git a/drivers/staging/lttng/ltt-events.h b/drivers/staging/lttng/ltt-events.h
-index 36b281a..c370ca6 100644
---- a/drivers/staging/lttng/ltt-events.h
-+++ b/drivers/staging/lttng/ltt-events.h
-@@ -67,8 +67,8 @@ struct lttng_enum_entry {
- struct lttng_integer_type {
- unsigned int size; /* in bits */
- unsigned short alignment; /* in bits */
-- unsigned int signedness:1;
-- unsigned int reverse_byte_order:1;
-+ uint signedness:1;
-+ uint reverse_byte_order:1;
- unsigned int base; /* 2, 8, 10, 16, for pretty print */
- enum lttng_string_encodings encoding;
- };
-@@ -191,7 +191,7 @@ struct ltt_event {
- } ftrace;
- } u;
- struct list_head list; /* Event list */
-- int metadata_dumped:1;
-+ uint metadata_dumped:1;
- };
-
- struct ltt_channel_ops {
-@@ -251,7 +251,7 @@ struct ltt_channel {
- struct ltt_event *sc_compat_unknown;
- struct ltt_event *sc_exit; /* for syscall exit */
- int header_type; /* 0: unset, 1: compact, 2: large */
-- int metadata_dumped:1;
-+ uint metadata_dumped:1;
- };
-
- struct ltt_session {
-@@ -264,7 +264,7 @@ struct ltt_session {
- struct list_head list; /* Session list */
- unsigned int free_chan_id; /* Next chan ID to allocate */
- uuid_le uuid; /* Trace session unique ID */
-- int metadata_dumped:1;
-+ uint metadata_dumped:1;
- };
-
- struct ltt_session *ltt_session_create(void);
---
-1.7.9
-
diff --git a/patches.lttng/0172-staging-lttng-Fix-recent-modifications-to-string_fro.patch b/patches.lttng/0172-staging-lttng-Fix-recent-modifications-to-string_fro.patch
deleted file mode 100644
index 9b0ccef7727..00000000000
--- a/patches.lttng/0172-staging-lttng-Fix-recent-modifications-to-string_fro.patch
+++ /dev/null
@@ -1,42 +0,0 @@
-From 643167423858052c26b2dfaf332c1ec0b472ea7a Mon Sep 17 00:00:00 2001
-From: Yannick Brosseau <yannick.brosseau@gmail.com>
-Date: Fri, 2 Dec 2011 21:13:32 -0500
-Subject: staging: lttng: Fix recent modifications to string_from_user
- operation
-
-Include: a fix for a recently introduced change: obviously max_t should
-be used instead of min_t here. Also, a likely should apply to the result
-of the comparison, not the variable per se.
-
-Signed-off-by: Yannick Brosseau <yannick.brosseau@gmail.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- drivers/staging/lttng/probes/lttng-events.h | 4 ++--
- 1 files changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/drivers/staging/lttng/probes/lttng-events.h b/drivers/staging/lttng/probes/lttng-events.h
-index ff6273f..d486994 100644
---- a/drivers/staging/lttng/probes/lttng-events.h
-+++ b/drivers/staging/lttng/probes/lttng-events.h
-@@ -347,7 +347,7 @@ static __used struct lttng_probe_desc TP_ID(__probe_desc___, TRACE_SYSTEM) = {
- #undef __string_from_user
- #define __string_from_user(_item, _src) \
- __event_len += __dynamic_len[__dynamic_len_idx++] = \
-- min_t(size_t, strlen_user(_src), 1);
-+ max_t(size_t, strlen_user(_src), 1);
-
- #undef TP_PROTO
- #define TP_PROTO(args...) args
-@@ -557,7 +557,7 @@ __assign_##dest##_2: \
- (void) __typemap.dest; \
- lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest));\
- __ustrlen = __get_dynamic_array_len(dest); \
-- if (likely(__ustrlen) > 1) { \
-+ if (likely(__ustrlen > 1)) { \
- __chan->ops->event_write_from_user(&__ctx, src, \
- __ustrlen - 1); \
- } \
---
-1.7.9
-
diff --git a/patches.lttng/0173-staging-lttng-TODO-update-lttng-reported-to-work-fin.patch b/patches.lttng/0173-staging-lttng-TODO-update-lttng-reported-to-work-fin.patch
deleted file mode 100644
index e3a98e3f332..00000000000
--- a/patches.lttng/0173-staging-lttng-TODO-update-lttng-reported-to-work-fin.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From 4ee778a44cc353ee201e933e45f493453cde3dfb Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Sat, 3 Dec 2011 13:05:37 -0500
-Subject: staging: lttng: TODO update: lttng reported to work fine on -rt now
-
-The patch "lttng: Fix recent modifications to string_from_user
-operation" has been confirmed to fix the corrupted trace problem
-experienced on -rt kernel by the original bug reporter. Remove the
-entry from the LTTng TODO list.
-
-Reported-by: Yannick Brosseau <yannick.brosseau@gmail.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- drivers/staging/lttng/TODO | 6 ------
- 1 files changed, 0 insertions(+), 6 deletions(-)
-
-diff --git a/drivers/staging/lttng/TODO b/drivers/staging/lttng/TODO
-index 3fdc5e6..5e3a581 100644
---- a/drivers/staging/lttng/TODO
-+++ b/drivers/staging/lttng/TODO
-@@ -35,12 +35,6 @@ A) Cleanup/Testing
- depend on the producer to push the reader position.
- Contact: Julien Desfossez <julien.desfossez@polymtl.ca>
-
-- 4) Test latest -rt kernel support.
-- There has been report of corrupted traces when tracing a
-- 3.0.10-rt27 in the area of access_ok() system call event.
-- Still has to be investigated. Cannot be reproduced with
-- mainline kernel.
-- Contact: Yannick Brosseau <yannick.brosseau@polymtl.ca>
-
- B) Features
-
---
-1.7.9
-
diff --git a/patches.lttng/0174-staging-lttng-Update-max-symbol-length-to-256.patch b/patches.lttng/0174-staging-lttng-Update-max-symbol-length-to-256.patch
deleted file mode 100644
index 289b02bd8dc..00000000000
--- a/patches.lttng/0174-staging-lttng-Update-max-symbol-length-to-256.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From 4588d6d4087bfcd3663e9dc26d0fffacb38c4df6 Mon Sep 17 00:00:00 2001
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Mon, 5 Dec 2011 20:07:35 -0500
-Subject: staging: lttng: Update max symbol length to 256
-
-The user-space tracer, along with the control tools, now support longer
-event name strings (up to 256 chars, including \0).
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
----
- drivers/staging/lttng/ltt-debugfs-abi.h | 2 +-
- 1 files changed, 1 insertions(+), 1 deletions(-)
-
-diff --git a/drivers/staging/lttng/ltt-debugfs-abi.h b/drivers/staging/lttng/ltt-debugfs-abi.h
-index 42bc9fd..a018297 100644
---- a/drivers/staging/lttng/ltt-debugfs-abi.h
-+++ b/drivers/staging/lttng/ltt-debugfs-abi.h
-@@ -13,7 +13,7 @@
-
- #include <linux/fs.h>
-
--#define LTTNG_SYM_NAME_LEN 128
-+#define LTTNG_SYM_NAME_LEN 256
-
- enum lttng_kernel_instrumentation {
- LTTNG_KERNEL_TRACEPOINT = 0,
---
-1.7.9
-
diff --git a/patches.lttng/lttng-2.3.4.patch b/patches.lttng/lttng-2.3.4.patch
new file mode 100644
index 00000000000..235ab9d59fc
--- /dev/null
+++ b/patches.lttng/lttng-2.3.4.patch
@@ -0,0 +1,69694 @@
+Subject: LTTng 2.3.4 release
+From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+
+This was taken from the git://git.lttng.org/lttng-modules.git stable-2.3
+branch, and turned into a patch for the 3.10.y kernel by Greg.
+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ MAINTAINERS | 7
+ drivers/staging/Kconfig | 2
+ drivers/staging/Makefile | 1
+ drivers/staging/lttng/Kconfig | 35
+ drivers/staging/lttng/LICENSE | 27
+ drivers/staging/lttng/Makefile | 38
+ drivers/staging/lttng/README | 93
+ drivers/staging/lttng/TODO | 117
+ drivers/staging/lttng/instrumentation/events/README | 27
+ drivers/staging/lttng/instrumentation/events/lttng-module/arch/x86/kvm/mmutrace.h | 285
+ drivers/staging/lttng/instrumentation/events/lttng-module/arch/x86/kvm/trace.h | 833 ++
+ drivers/staging/lttng/instrumentation/events/lttng-module/asoc.h | 422 +
+ drivers/staging/lttng/instrumentation/events/lttng-module/block.h | 878 ++
+ drivers/staging/lttng/instrumentation/events/lttng-module/btrfs.h | 1117 +++
+ drivers/staging/lttng/instrumentation/events/lttng-module/compaction.h | 74
+ drivers/staging/lttng/instrumentation/events/lttng-module/ext3.h | 902 ++
+ drivers/staging/lttng/instrumentation/events/lttng-module/ext4.h | 3130 ++++++++++
+ drivers/staging/lttng/instrumentation/events/lttng-module/gpio.h | 56
+ drivers/staging/lttng/instrumentation/events/lttng-module/irq.h | 220
+ drivers/staging/lttng/instrumentation/events/lttng-module/jbd.h | 268
+ drivers/staging/lttng/instrumentation/events/lttng-module/jbd2.h | 280
+ drivers/staging/lttng/instrumentation/events/lttng-module/kmem.h | 380 +
+ drivers/staging/lttng/instrumentation/events/lttng-module/kvm.h | 356 +
+ drivers/staging/lttng/instrumentation/events/lttng-module/lock.h | 207
+ drivers/staging/lttng/instrumentation/events/lttng-module/lttng-statedump.h | 166
+ drivers/staging/lttng/instrumentation/events/lttng-module/module.h | 157
+ drivers/staging/lttng/instrumentation/events/lttng-module/napi.h | 38
+ drivers/staging/lttng/instrumentation/events/lttng-module/net.h | 105
+ drivers/staging/lttng/instrumentation/events/lttng-module/power.h | 351 +
+ drivers/staging/lttng/instrumentation/events/lttng-module/printk.h | 83
+ drivers/staging/lttng/instrumentation/events/lttng-module/random.h | 152
+ drivers/staging/lttng/instrumentation/events/lttng-module/rcu.h | 763 ++
+ drivers/staging/lttng/instrumentation/events/lttng-module/regmap.h | 188
+ drivers/staging/lttng/instrumentation/events/lttng-module/regulator.h | 141
+ drivers/staging/lttng/instrumentation/events/lttng-module/rpm.h | 101
+ drivers/staging/lttng/instrumentation/events/lttng-module/sched.h | 560 +
+ drivers/staging/lttng/instrumentation/events/lttng-module/scsi.h | 406 +
+ drivers/staging/lttng/instrumentation/events/lttng-module/signal.h | 202
+ drivers/staging/lttng/instrumentation/events/lttng-module/skb.h | 84
+ drivers/staging/lttng/instrumentation/events/lttng-module/sock.h | 68
+ drivers/staging/lttng/instrumentation/events/lttng-module/sunrpc.h | 177
+ drivers/staging/lttng/instrumentation/events/lttng-module/syscalls.h | 76
+ drivers/staging/lttng/instrumentation/events/lttng-module/timer.h | 336 +
+ drivers/staging/lttng/instrumentation/events/lttng-module/udp.h | 32
+ drivers/staging/lttng/instrumentation/events/lttng-module/vmscan.h | 594 +
+ drivers/staging/lttng/instrumentation/events/lttng-module/workqueue.h | 219
+ drivers/staging/lttng/instrumentation/events/lttng-module/writeback.h | 617 +
+ drivers/staging/lttng/instrumentation/events/mainline/arch/x86/kvm/mmutrace.h | 285
+ drivers/staging/lttng/instrumentation/events/mainline/arch/x86/kvm/trace.h | 828 ++
+ drivers/staging/lttng/instrumentation/events/mainline/asoc.h | 410 +
+ drivers/staging/lttng/instrumentation/events/mainline/block.h | 571 +
+ drivers/staging/lttng/instrumentation/events/mainline/btrfs.h | 918 ++
+ drivers/staging/lttng/instrumentation/events/mainline/compaction.h | 74
+ drivers/staging/lttng/instrumentation/events/mainline/ext3.h | 864 ++
+ drivers/staging/lttng/instrumentation/events/mainline/ext4.h | 2061 ++++++
+ drivers/staging/lttng/instrumentation/events/mainline/gpio.h | 56
+ drivers/staging/lttng/instrumentation/events/mainline/irq.h | 150
+ drivers/staging/lttng/instrumentation/events/mainline/jbd.h | 194
+ drivers/staging/lttng/instrumentation/events/mainline/jbd2.h | 262
+ drivers/staging/lttng/instrumentation/events/mainline/kmem.h | 308
+ drivers/staging/lttng/instrumentation/events/mainline/kvm.h | 312
+ drivers/staging/lttng/instrumentation/events/mainline/lock.h | 86
+ drivers/staging/lttng/instrumentation/events/mainline/module.h | 131
+ drivers/staging/lttng/instrumentation/events/mainline/napi.h | 38
+ drivers/staging/lttng/instrumentation/events/mainline/net.h | 84
+ drivers/staging/lttng/instrumentation/events/mainline/power.h | 275
+ drivers/staging/lttng/instrumentation/events/mainline/printk.h | 41
+ drivers/staging/lttng/instrumentation/events/mainline/random.h | 134
+ drivers/staging/lttng/instrumentation/events/mainline/rcu.h | 618 +
+ drivers/staging/lttng/instrumentation/events/mainline/regmap.h | 181
+ drivers/staging/lttng/instrumentation/events/mainline/regulator.h | 141
+ drivers/staging/lttng/instrumentation/events/mainline/rpm.h | 100
+ drivers/staging/lttng/instrumentation/events/mainline/sched.h | 432 +
+ drivers/staging/lttng/instrumentation/events/mainline/scsi.h | 365 +
+ drivers/staging/lttng/instrumentation/events/mainline/signal.h | 125
+ drivers/staging/lttng/instrumentation/events/mainline/skb.h | 75
+ drivers/staging/lttng/instrumentation/events/mainline/sock.h | 68
+ drivers/staging/lttng/instrumentation/events/mainline/sunrpc.h | 177
+ drivers/staging/lttng/instrumentation/events/mainline/syscalls.h | 75
+ drivers/staging/lttng/instrumentation/events/mainline/timer.h | 329 +
+ drivers/staging/lttng/instrumentation/events/mainline/udp.h | 32
+ drivers/staging/lttng/instrumentation/events/mainline/vmscan.h | 383 +
+ drivers/staging/lttng/instrumentation/events/mainline/workqueue.h | 121
+ drivers/staging/lttng/instrumentation/events/mainline/writeback.h | 492 +
+ drivers/staging/lttng/instrumentation/syscalls/3.0.34/powerpc-32-syscalls-3.0.34 | 286
+ drivers/staging/lttng/instrumentation/syscalls/3.1.0-rc6/x86-32-syscalls-3.1.0-rc6 | 291
+ drivers/staging/lttng/instrumentation/syscalls/3.10.0-rc7/x86-64-syscalls-3.10.0-rc7 | 290
+ drivers/staging/lttng/instrumentation/syscalls/3.4.25/arm-32-syscalls-3.4.25 | 299
+ drivers/staging/lttng/instrumentation/syscalls/3.5.0/mips-32-syscalls-3.5.0 | 141
+ drivers/staging/lttng/instrumentation/syscalls/3.5.0/mips-64-syscalls-3.5.0 | 289
+ drivers/staging/lttng/instrumentation/syscalls/README | 18
+ drivers/staging/lttng/instrumentation/syscalls/headers/arm-32-syscalls-3.4.25_integers.h | 1181 +++
+ drivers/staging/lttng/instrumentation/syscalls/headers/arm-32-syscalls-3.4.25_integers_override.h | 52
+ drivers/staging/lttng/instrumentation/syscalls/headers/arm-32-syscalls-3.4.25_pointers.h | 2316 +++++++
+ drivers/staging/lttng/instrumentation/syscalls/headers/arm-32-syscalls-3.4.25_pointers_override.h | 39
+ drivers/staging/lttng/instrumentation/syscalls/headers/compat_syscalls_integers.h | 3
+ drivers/staging/lttng/instrumentation/syscalls/headers/compat_syscalls_pointers.h | 3
+ drivers/staging/lttng/instrumentation/syscalls/headers/mips-32-syscalls-3.5.0_integers.h | 677 ++
+ drivers/staging/lttng/instrumentation/syscalls/headers/mips-32-syscalls-3.5.0_integers_override.h | 3
+ drivers/staging/lttng/instrumentation/syscalls/headers/mips-32-syscalls-3.5.0_pointers.h | 984 +++
+ drivers/staging/lttng/instrumentation/syscalls/headers/mips-32-syscalls-3.5.0_pointers_override.h | 8
+ drivers/staging/lttng/instrumentation/syscalls/headers/mips-64-syscalls-3.5.0_integers.h | 1163 +++
+ drivers/staging/lttng/instrumentation/syscalls/headers/mips-64-syscalls-3.5.0_integers_override.h | 3
+ drivers/staging/lttng/instrumentation/syscalls/headers/mips-64-syscalls-3.5.0_pointers.h | 2232 +++++++
+ drivers/staging/lttng/instrumentation/syscalls/headers/mips-64-syscalls-3.5.0_pointers_override.h | 8
+ drivers/staging/lttng/instrumentation/syscalls/headers/powerpc-32-syscalls-3.0.34_integers.h | 1043 +++
+ drivers/staging/lttng/instrumentation/syscalls/headers/powerpc-32-syscalls-3.0.34_integers_override.h | 9
+ drivers/staging/lttng/instrumentation/syscalls/headers/powerpc-32-syscalls-3.0.34_pointers.h | 2316 +++++++
+ drivers/staging/lttng/instrumentation/syscalls/headers/powerpc-32-syscalls-3.0.34_pointers_override.h | 36
+ drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_integers.h | 15
+ drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_integers_override.h | 14
+ drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers.h | 15
+ drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers_override.h | 53
+ drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_unknown.h | 55
+ drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_integers.h | 1163 +++
+ drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_integers_override.h | 38
+ drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers.h | 2232 +++++++
+ drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers_override.h | 33
+ drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.10.0-rc7_integers.h | 1097 +++
+ drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.10.0-rc7_integers_override.h | 3
+ drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.10.0-rc7_pointers.h | 2304 +++++++
+ drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.10.0-rc7_pointers_override.h | 12
+ drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/Makefile | 1
+ drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/lttng-syscalls-extractor.c | 100
+ drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-generate-headers.sh | 279
+ drivers/staging/lttng/lib/Makefile | 11
+ drivers/staging/lttng/lib/align.h | 73
+ drivers/staging/lttng/lib/bitfield.h | 408 +
+ drivers/staging/lttng/lib/bug.h | 41
+ drivers/staging/lttng/lib/prio_heap/lttng_prio_heap.c | 215
+ drivers/staging/lttng/lib/prio_heap/lttng_prio_heap.h | 125
+ drivers/staging/lttng/lib/ringbuffer/api.h | 37
+ drivers/staging/lttng/lib/ringbuffer/backend.h | 272
+ drivers/staging/lttng/lib/ringbuffer/backend_internal.h | 461 +
+ drivers/staging/lttng/lib/ringbuffer/backend_types.h | 97
+ drivers/staging/lttng/lib/ringbuffer/config.h | 315 +
+ drivers/staging/lttng/lib/ringbuffer/frontend.h | 240
+ drivers/staging/lttng/lib/ringbuffer/frontend_api.h | 371 +
+ drivers/staging/lttng/lib/ringbuffer/frontend_internal.h | 456 +
+ drivers/staging/lttng/lib/ringbuffer/frontend_types.h | 188
+ drivers/staging/lttng/lib/ringbuffer/iterator.h | 83
+ drivers/staging/lttng/lib/ringbuffer/nohz.h | 42
+ drivers/staging/lttng/lib/ringbuffer/ring_buffer_backend.c | 869 ++
+ drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c | 1830 +++++
+ drivers/staging/lttng/lib/ringbuffer/ring_buffer_iterator.c | 810 ++
+ drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c | 128
+ drivers/staging/lttng/lib/ringbuffer/ring_buffer_splice.c | 227
+ drivers/staging/lttng/lib/ringbuffer/ring_buffer_vfs.c | 450 +
+ drivers/staging/lttng/lib/ringbuffer/vatomic.h | 97
+ drivers/staging/lttng/lib/ringbuffer/vfs.h | 150
+ drivers/staging/lttng/lttng-abi-old.h | 141
+ drivers/staging/lttng/lttng-abi.c | 1346 ++++
+ drivers/staging/lttng/lttng-abi.h | 177
+ drivers/staging/lttng/lttng-calibrate.c | 42
+ drivers/staging/lttng/lttng-context-hostname.c | 99
+ drivers/staging/lttng/lttng-context-nice.c | 81
+ drivers/staging/lttng/lttng-context-perf-counters.c | 285
+ drivers/staging/lttng/lttng-context-pid.c | 81
+ drivers/staging/lttng/lttng-context-ppid.c | 93
+ drivers/staging/lttng/lttng-context-prio.c | 102
+ drivers/staging/lttng/lttng-context-procname.c | 85
+ drivers/staging/lttng/lttng-context-tid.c | 81
+ drivers/staging/lttng/lttng-context-vpid.c | 87
+ drivers/staging/lttng/lttng-context-vppid.c | 102
+ drivers/staging/lttng/lttng-context-vtid.c | 87
+ drivers/staging/lttng/lttng-context.c | 105
+ drivers/staging/lttng/lttng-endian.h | 43
+ drivers/staging/lttng/lttng-events.c | 1260 ++++
+ drivers/staging/lttng/lttng-events.h | 507 +
+ drivers/staging/lttng/lttng-kernel-version.h | 36
+ drivers/staging/lttng/lttng-probes.c | 171
+ drivers/staging/lttng/lttng-ring-buffer-client-discard.c | 33
+ drivers/staging/lttng/lttng-ring-buffer-client-mmap-discard.c | 33
+ drivers/staging/lttng/lttng-ring-buffer-client-mmap-overwrite.c | 33
+ drivers/staging/lttng/lttng-ring-buffer-client-overwrite.c | 33
+ drivers/staging/lttng/lttng-ring-buffer-client.h | 600 +
+ drivers/staging/lttng/lttng-ring-buffer-metadata-client.c | 33
+ drivers/staging/lttng/lttng-ring-buffer-metadata-client.h | 342 +
+ drivers/staging/lttng/lttng-ring-buffer-metadata-mmap-client.c | 33
+ drivers/staging/lttng/lttng-statedump-impl.c | 427 +
+ drivers/staging/lttng/lttng-syscalls.c | 459 +
+ drivers/staging/lttng/lttng-tracer-core.h | 41
+ drivers/staging/lttng/lttng-tracer.h | 81
+ drivers/staging/lttng/probes/Makefile | 240
+ drivers/staging/lttng/probes/define_trace.h | 180
+ drivers/staging/lttng/probes/lttng-events-reset.h | 99
+ drivers/staging/lttng/probes/lttng-events.h | 868 ++
+ drivers/staging/lttng/probes/lttng-ftrace.c | 201
+ drivers/staging/lttng/probes/lttng-kprobes.c | 177
+ drivers/staging/lttng/probes/lttng-kretprobes.c | 290
+ drivers/staging/lttng/probes/lttng-probe-asoc.c | 45
+ drivers/staging/lttng/probes/lttng-probe-block.c | 45
+ drivers/staging/lttng/probes/lttng-probe-btrfs.c | 48
+ drivers/staging/lttng/probes/lttng-probe-compaction.c | 43
+ drivers/staging/lttng/probes/lttng-probe-ext3.c | 52
+ drivers/staging/lttng/probes/lttng-probe-ext4.c | 51
+ drivers/staging/lttng/probes/lttng-probe-gpio.c | 43
+ drivers/staging/lttng/probes/lttng-probe-irq.c | 45
+ drivers/staging/lttng/probes/lttng-probe-jbd.c | 43
+ drivers/staging/lttng/probes/lttng-probe-jbd2.c | 45
+ drivers/staging/lttng/probes/lttng-probe-kmem.c | 45
+ drivers/staging/lttng/probes/lttng-probe-kvm-x86-mmu.c | 43
+ drivers/staging/lttng/probes/lttng-probe-kvm-x86.c | 45
+ drivers/staging/lttng/probes/lttng-probe-kvm.c | 45
+ drivers/staging/lttng/probes/lttng-probe-lock.c | 50
+ drivers/staging/lttng/probes/lttng-probe-module.c | 45
+ drivers/staging/lttng/probes/lttng-probe-napi.c | 45
+ drivers/staging/lttng/probes/lttng-probe-net.c | 43
+ drivers/staging/lttng/probes/lttng-probe-power.c | 45
+ drivers/staging/lttng/probes/lttng-probe-printk.c | 43
+ drivers/staging/lttng/probes/lttng-probe-random.c | 43
+ drivers/staging/lttng/probes/lttng-probe-rcu.c | 44
+ drivers/staging/lttng/probes/lttng-probe-regmap.c | 44
+ drivers/staging/lttng/probes/lttng-probe-regulator.c | 43
+ drivers/staging/lttng/probes/lttng-probe-rpm.c | 44
+ drivers/staging/lttng/probes/lttng-probe-sched.c | 44
+ drivers/staging/lttng/probes/lttng-probe-scsi.c | 44
+ drivers/staging/lttng/probes/lttng-probe-signal.c | 42
+ drivers/staging/lttng/probes/lttng-probe-skb.c | 45
+ drivers/staging/lttng/probes/lttng-probe-sock.c | 43
+ drivers/staging/lttng/probes/lttng-probe-statedump.c | 46
+ drivers/staging/lttng/probes/lttng-probe-sunrpc.c | 43
+ drivers/staging/lttng/probes/lttng-probe-timer.c | 46
+ drivers/staging/lttng/probes/lttng-probe-udp.c | 43
+ drivers/staging/lttng/probes/lttng-probe-user.c | 54
+ drivers/staging/lttng/probes/lttng-probe-user.h | 30
+ drivers/staging/lttng/probes/lttng-probe-vmscan.c | 45
+ drivers/staging/lttng/probes/lttng-probe-workqueue.c | 49
+ drivers/staging/lttng/probes/lttng-probe-writeback.c | 54
+ drivers/staging/lttng/probes/lttng-type-list.h | 33
+ drivers/staging/lttng/probes/lttng-types.c | 61
+ drivers/staging/lttng/probes/lttng-types.h | 84
+ drivers/staging/lttng/probes/lttng.h | 27
+ drivers/staging/lttng/wrapper/compiler.h | 42
+ drivers/staging/lttng/wrapper/fdtable.c | 57
+ drivers/staging/lttng/wrapper/fdtable.h | 44
+ drivers/staging/lttng/wrapper/ftrace.h | 84
+ drivers/staging/lttng/wrapper/inline_memcpy.h | 23
+ drivers/staging/lttng/wrapper/irq.h | 38
+ drivers/staging/lttng/wrapper/irqdesc.c | 58
+ drivers/staging/lttng/wrapper/irqdesc.h | 33
+ drivers/staging/lttng/wrapper/kallsyms.h | 51
+ drivers/staging/lttng/wrapper/nsproxy.h | 42
+ drivers/staging/lttng/wrapper/perf.h | 71
+ drivers/staging/lttng/wrapper/poll.h | 33
+ drivers/staging/lttng/wrapper/random.c | 77
+ drivers/staging/lttng/wrapper/random.h | 32
+ drivers/staging/lttng/wrapper/ringbuffer/api.h | 1
+ drivers/staging/lttng/wrapper/ringbuffer/backend.h | 1
+ drivers/staging/lttng/wrapper/ringbuffer/backend_internal.h | 2
+ drivers/staging/lttng/wrapper/ringbuffer/backend_types.h | 1
+ drivers/staging/lttng/wrapper/ringbuffer/config.h | 1
+ drivers/staging/lttng/wrapper/ringbuffer/frontend.h | 1
+ drivers/staging/lttng/wrapper/ringbuffer/frontend_api.h | 1
+ drivers/staging/lttng/wrapper/ringbuffer/frontend_internal.h | 1
+ drivers/staging/lttng/wrapper/ringbuffer/frontend_types.h | 1
+ drivers/staging/lttng/wrapper/ringbuffer/iterator.h | 1
+ drivers/staging/lttng/wrapper/ringbuffer/nohz.h | 1
+ drivers/staging/lttng/wrapper/ringbuffer/vatomic.h | 1
+ drivers/staging/lttng/wrapper/ringbuffer/vfs.h | 1
+ drivers/staging/lttng/wrapper/spinlock.h | 47
+ drivers/staging/lttng/wrapper/splice.c | 60
+ drivers/staging/lttng/wrapper/splice.h | 37
+ drivers/staging/lttng/wrapper/trace-clock.h | 102
+ drivers/staging/lttng/wrapper/tracepoint.h | 44
+ drivers/staging/lttng/wrapper/uuid.h | 43
+ drivers/staging/lttng/wrapper/vmalloc.h | 63
+ drivers/staging/lttng/wrapper/writeback.h | 61
+ 268 files changed, 68597 insertions(+)
+
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -5079,6 +5079,13 @@ T: git git://github.com/linux-test-proje
+ T: git git://ltp.git.sourceforge.net/gitroot/ltp/ltp-dev
+ S: Maintained
+
++LTTng (Linux Trace Toolkit Next Generation)
++M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++L: lttng-dev@lists.lttng.org (moderated for non-subscribers)
++W: http://lttng.org
++S: Maintained
++F: drivers/staging/lttng/
++
+ M32R ARCHITECTURE
+ M: Hirokazu Takata <takata@linux-m32r.org>
+ L: linux-m32r@ml.linux-m32r.org (moderated for non-subscribers)
+--- a/drivers/staging/Kconfig
++++ b/drivers/staging/Kconfig
+@@ -140,4 +140,6 @@ source "drivers/staging/netlogic/Kconfig
+
+ source "drivers/staging/dwc2/Kconfig"
+
++source "drivers/staging/lttng/Kconfig"
++
+ endif # STAGING
+--- a/drivers/staging/Makefile
++++ b/drivers/staging/Makefile
+@@ -62,3 +62,4 @@ obj-$(CONFIG_FIREWIRE_SERIAL) += fwseria
+ obj-$(CONFIG_ZCACHE) += zcache/
+ obj-$(CONFIG_GOLDFISH) += goldfish/
+ obj-$(CONFIG_USB_DWC2) += dwc2/
++obj-$(CONFIG_LTTNG) += lttng/
+--- /dev/null
++++ b/drivers/staging/lttng/Kconfig
+@@ -0,0 +1,35 @@
++config LTTNG
++ tristate "LTTng kernel tracer"
++ depends on TRACEPOINTS
++ help
++ The LTTng 2.0 Tracer Toolchain allows integrated kernel and
++ user-space tracing from a single user interface: the "lttng"
++ command. See http://lttng.org website for the "lttng-tools"
++ user-space tracer control tools package and the "babeltrace"
++ package for conversion of trace data to a human-readable
++ format.
++
++ LTTng features:
++ - System-wide tracing across kernel, libraries and
++ applications,
++ - Tracepoints, detailed syscall tracing (fast strace replacement),
++ Function tracer, CPU Performance Monitoring Unit (PMU) counters
++ and kprobes support,
++ - Have the ability to attach "context" information to events in the
++ trace (e.g. any PMU counter, pid, ppid, tid, comm name, etc). All
++ the extra information fields to be collected with events are
++ optional, specified on a per-tracing-session basis (except for
++ timestamp and event id, which are mandatory).
++ - Precise and fast clock sources with near cycle-level
++ timestamps,
++ - Efficient trace data transport:
++ - Compact Binary format with CTF,
++ - Per-core buffers ensures scalability,
++ - Fast-paths in caller context, amortized synchronization,
++ - Zero-copy using splice and mmap system calls, over disk,
++ network or consumed in-place,
++ - Multiple concurrent tracing sessions are supported,
++ - Designed to meet hard real-time constraints,
++ - Supports live streaming of the trace data,
++ - Produces CTF (Common Trace Format) natively (see
++ http://www.efficios.com/ctf).
+--- /dev/null
++++ b/drivers/staging/lttng/LICENSE
+@@ -0,0 +1,27 @@
++LTTng modules licensing
++Mathieu Desnoyers
++June 2, 2011
++
++* LGPLv2.1/GPLv2 dual-license
++
++The files contained within this package are licensed under
++LGPLv2.1/GPLv2 dual-license (see lgpl-2.1.txt and gpl-2.0.txt for
++details), except for files identified by the following sections.
++
++* GPLv2 license
++
++These files are licensed exclusively under the GPLv2 license. See
++gpl-2.0.txt for details.
++
++lib/ringbuffer/ring_buffer_splice.c
++lib/ringbuffer/ring_buffer_mmap.c
++instrumentation/events/mainline/*.h
++instrumentation/events/lttng-modules/*.h
++
++* MIT-style license
++
++These files are licensed under an MIT-style license:
++
++lib/prio_heap/lttng_prio_heap.h
++lib/prio_heap/lttng_prio_heap.c
++lib/bitfield.h
+--- /dev/null
++++ b/drivers/staging/lttng/Makefile
+@@ -0,0 +1,38 @@
++#
++# Makefile for the LTTng modules.
++#
++
++obj-m += lttng-ring-buffer-client-discard.o
++obj-m += lttng-ring-buffer-client-overwrite.o
++obj-m += lttng-ring-buffer-metadata-client.o
++obj-m += lttng-ring-buffer-client-mmap-discard.o
++obj-m += lttng-ring-buffer-client-mmap-overwrite.o
++obj-m += lttng-ring-buffer-metadata-mmap-client.o
++
++obj-m += lttng-tracer.o
++lttng-tracer-objs := lttng-events.o lttng-abi.o \
++ lttng-probes.o lttng-context.o \
++ lttng-context-pid.o lttng-context-procname.o \
++ lttng-context-prio.o lttng-context-nice.o \
++ lttng-context-vpid.o lttng-context-tid.o \
++ lttng-context-vtid.o lttng-context-ppid.o \
++ lttng-context-vppid.o lttng-calibrate.o \
++ lttng-context-hostname.o wrapper/random.o
++
++obj-m += lttng-statedump.o
++lttng-statedump-objs := lttng-statedump-impl.o wrapper/irqdesc.o \
++ wrapper/fdtable.o
++
++ifneq ($(CONFIG_HAVE_SYSCALL_TRACEPOINTS),)
++lttng-tracer-objs += lttng-syscalls.o probes/lttng-probe-user.o
++endif
++
++ifneq ($(CONFIG_PERF_EVENTS),)
++lttng-tracer-objs += $(shell \
++ if [ $(VERSION) -ge 3 \
++ -o \( $(VERSION) -eq 2 -a $(PATCHLEVEL) -ge 6 -a $(SUBLEVEL) -ge 33 \) ] ; then \
++ echo "lttng-context-perf-counters.o" ; fi;)
++endif
++
++obj-m += probes/
++obj-m += lib/
+--- /dev/null
++++ b/drivers/staging/lttng/README
+@@ -0,0 +1,93 @@
++LTTng 2.x modules
++
++Mathieu Desnoyers
++March 29, 2013
++
++LTTng 2.x kernel modules build against a vanilla or distribution kernel, without
++need for additional patches. Other features:
++
++- Produces CTF (Common Trace Format) natively,
++ (http://www.efficios.com/ctf)
++- Tracepoints, Function tracer, CPU Performance Monitoring Unit (PMU)
++ counters, kprobes, and kretprobes support,
++- Integrated interface for both kernel and userspace tracing,
++- Have the ability to attach "context" information to events in the
++ trace (e.g. any PMU counter, pid, ppid, tid, comm name, etc).
++ All the extra information fields to be collected with events are
++ optional, specified on a per-tracing-session basis (except for
++ timestamp and event id, which are mandatory).
++
++To build and install, you will need to have your kernel headers available (or
++access to your full kernel source tree), and use:
++
++% make
++# make modules_install
++# depmod -a
++
++If you need to specify the target directory to the kernel you want to build
++against, use:
++
++% KERNELDIR=path_to_kernel_dir make
++# KERNELDIR=path_to_kernel_dir make modules_install
++# depmod -a kernel_version
++
++Use lttng-tools to control the tracer. LTTng tools should automatically load
++the kernel modules when needed. Use Babeltrace to print traces as a
++human-readable text log. These tools are available at the following URL:
++http://lttng.org/lttng2.0
++
++So far, it has been tested on vanilla Linux kernels 2.6.38, 2.6.39, 3.0,
++3.1, 3.2, 3.3 (on x86 32/64-bit, and powerpc 32-bit at the moment, build
++tested on ARM), 3.4, 3.5, 3.8, 3.9-rc on x86 64-bit. Kernels 2.6.32 to
++2.6.34 need up to 3 patches applied (refer to linux-patches within the
++lttng-modules tree). It should work fine with newer kernels and other
++architectures, but expect build issues with kernels older than 2.6.36.
++The clock source currently used is the standard gettimeofday (slower,
++less scalable and less precise than the LTTng 0.x clocks). Support for
++LTTng 0.x clocks will be added back soon into LTTng 2.0.
++
++
++* Kernel config options required
++
++CONFIG_MODULES: required
++ * Kernel modules support.
++CONFIG_KALLSYMS: required
++ * See wrapper/ files. This is necessary until the few required missing
++ symbols are exported to GPL modules from mainline.
++CONFIG_HIGH_RES_TIMERS: required
++ * Needed for LTTng 2.0 clock source.
++CONFIG_TRACEPOINTS: required
++ kernel tracepoint instrumentation
++ * Enabled as side-effect of any of the perf/ftrace/blktrace
++ instrumentation features.
++
++
++* Kernel config options supported (optional)
++
++The following kernel configuration options will affect the features
++available from LTTng:
++
++
++CONFIG_HAVE_SYSCALL_TRACEPOINTS:
++ system call tracing
++ lttng enable-event -k --syscall
++ lttng enable-event -k -a
++CONFIG_PERF_EVENTS:
++ performance counters
++ lttng add-context -t perf:*
++CONFIG_EVENT_TRACING:
++ needed to allow block layer tracing
++CONFIG_KPROBES:
++ Dynamic probe.
++ lttng enable-event -k --probe ...
++CONFIG_KRETPROBES:
++ Dynamic function entry/return probe.
++ lttng enable-event -k --function ...
++
++
++* Note about Perf PMU counters support
++
++Each PMU counter has its zero value set when it is attached to a context with
++add-context. Therefore, it is normal that the same counters attached to both the
++stream context and event context show different values for a given event; what
++matters is that they increment at the same rate.
+--- /dev/null
++++ b/drivers/staging/lttng/TODO
+@@ -0,0 +1,117 @@
++Please contact Mathieu Desnoyers <mathieu.desnoyers@efficios.com> for
++questions about this TODO list. The "Cleanup/Testing" section would be
++good to go through before integration into mainline. The "Features"
++section is a wish list of features to complete before releasing the
++"LTTng 2.0" final version, but are not required to have LTTng working.
++These features are mostly performance enhancements and instrumentation
++enhancements.
++
++TODO:
++
++A) Cleanup/Testing
++
++ 1) Test lib ring buffer snapshot feature.
++ When working on the lttngtop project, Julien Desfossez
++ reported that he needed to push the consumer position
++ forward explicitely with lib_ring_buffer_put_next_subbuf.
++ This means that although the usual case of pairs of
++ lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf
++ work fine, there is probably a problem that needs to be
++ investigated in
++ lib_ring_buffer_get_subbuf/lib_ring_buffer_put_subbuf, which
++ depend on the producer to push the reader position.
++ Contact: Julien Desfossez <julien.desfossez@polymtl.ca>
++
++
++B) Features
++
++ 1) Integration of the LTTng 0.x trace clocks into
++ LTTng 2.0.
++ Currently using mainline kernel monotonic clock. NMIs can
++ therefore not be traced, and this causes a significant
++ performance degradation compared to the LTTng 0.x trace
++ clocks. Imply the creation of drivers/staging/lttng/arch to
++ contain the arch-specific clock support files.
++ * Dependency: addition of clock descriptions to CTF.
++ See: http://git.lttng.org/?p=linux-2.6-lttng.git;a=summary
++ for the LTTng 0.x git tree.
++
++ 2) Port OMAP3 LTTng trace clocks to x86 to support systems
++ without constant TSC.
++ * Dependency: (B.1)
++ See: http://git.lttng.org/?p=linux-2.6-lttng.git;a=summary
++ for the LTTng 0.x git tree.
++
++ 3) Implement mmap operation on an anonymous file created by a
++ LTTNG_KERNEL_CLOCK ioctl to export data to export
++ synchronized kernel and user-level LTTng trace clocks:
++ with:
++ - shared per-cpu data,
++ - read seqlock.
++ The content exported by this shared memory area will be
++ arch-specific.
++ * Dependency: (B.1) && (B.2)
++ See: http://git.lttng.org/?p=linux-2.6-lttng.git;a=summary
++ for the LTTng 0.x git tree, which has vDSO support for
++ LTTng trace clock on the x86 architecture.
++
++ 3) Integrate the "statedump" module from LTTng 0.x into LTTng
++ 2.0.
++ See: http://git.lttng.org/?p=lttng-modules.git;a=shortlog;h=refs/heads/v0.19-stable
++ ltt-statedump.c
++
++ 4) Generate system call TRACE_EVENT headers for all
++ architectures (currently done: x86 32/64).
++
++ 5) Define "unknown" system calls into instrumentation/syscalls
++ override files / or do SYSCALL_DEFINE improvements to
++ mainline kernel to allow automatic generation of these
++ missing system call descriptions.
++
++ 6) Create missing tracepoint event headers files into
++ instrumentation/events from headers located in
++ include/trace/events/. Choice: either do as currently done,
++ and copy those headers locally into the lttng driver and
++ perform the modifications locally, or push TRACE_EVENT API
++ modification into mainline headers, which would require
++ collaboration from Ftrace/Perf maintainers.
++
++ 7) Poll: implement a poll and/or epoll exclusive wakeup scheme,
++ which contradicts POSIX, but protect multiple consumer
++ threads from thundering herd effect.
++
++ 8) Re-integrate sample modules from libringbuffer into
++ lttng driver. Those modules can be used as example of how to
++ use libringbuffer in other contexts than LTTng, and are
++ useful to perform benchmarks of the ringbuffer library.
++ See: http://www.efficios.com/ringbuffer
++
++ 9) NOHZ support for lib ring buffer. NOHZ infrastructure in the
++ Linux kernel does not support notifiers chains, which does
++ not let LTTng play nicely with low power consumption setups
++ for flight recorder (overwrite mode) live traces. One way to
++ allow integration between NOHZ and LTTng would be to add
++ support for such notifiers into NOHZ kernel infrastructure.
++
++ 10) Turn lttng-probes.c probe_list into a
++ hash table. Turns O(n^2) trace systems registration (cost
++ for n systems) into O(n). (O(1) per system)
++
++ 11) drivers/staging/lttng/probes/lttng-ftrace.c:
++ LTTng currently uses kretprobes for per-function tracing,
++ not the function tracer. So lttng-ftrace.c should be used
++ for "all" function tracing.
++
++ 12) drivers/staging/lttng/probes/lttng-types.c:
++ This is a currently unused placeholder to export entire C
++ type declarations into the trace metadata, e.g. for support
++ of describing the layout of structures/enumeration mapping
++ along with syscall entry events. The design of this support
++ will likely change though, and become integrated with the
++ TRACE_EVENT support within lttng, by adding new macros, and
++ support for generation of metadata from these macros, to
++ allow description of those compound types/enumerations.
++
++Please send patches
++To: Greg Kroah-Hartman <greg@kroah.com>
++To: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/README
+@@ -0,0 +1,27 @@
++* Workflow for updating patches from newer kernel:
++
++Diff mainline/ and lttng-module/ directories.
++
++Pull the new headers from mainline kernel to mainline/.
++Copy them into lttng-modules.
++Apply diff. Fix conflicts.
++
++
++* Workflow to add new Tracepoint instrumentation to newer kernel,
++ and add support for it into LTTng:
++
++a) instrument the kernel with new trace events headers. If you want that
++ instrumentation distributed, you will have to push those changes into
++ the upstream Linux kernel first,
++b) copy those headers into lttng mainline/ directory,
++c) look at a diff from other headers between mainline/ and
++ lttng/, and use that as a recipe to create a new lttng/
++ header from the mainline/ header,
++d) create a new file in probes/ for the new trace event header you added,
++e) add it to probes/Makefile,
++f) build, make modules_install,
++g) don't forget to load that new module too.
++
++Currently, LTTng policy is to only accept headers derived from trace
++event headers accepted into the Linux kernel upstream for tracepoints
++related to upstream kernel instrumentation.
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/arch/x86/kvm/mmutrace.h
+@@ -0,0 +1,285 @@
++#if !defined(_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_KVMMMU_H
++
++#include <linux/tracepoint.h>
++#include <linux/ftrace_event.h>
++
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM kvmmmu
++
++#define KVM_MMU_PAGE_FIELDS \
++ __field(__u64, gfn) \
++ __field(__u32, role) \
++ __field(__u32, root_count) \
++ __field(bool, unsync)
++
++#define KVM_MMU_PAGE_ASSIGN(sp) \
++ tp_assign(gfn, sp->gfn) \
++ tp_assign(role, sp->role.word) \
++ tp_assign(root_count, sp->root_count) \
++ tp_assign(unsync, sp->unsync)
++
++#define KVM_MMU_PAGE_PRINTK() ({ \
++ const char *ret = p->buffer + p->len; \
++ static const char *access_str[] = { \
++ "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \
++ }; \
++ union kvm_mmu_page_role role; \
++ \
++ role.word = __entry->role; \
++ \
++ trace_seq_printf(p, "sp gfn %llx %u%s q%u%s %s%s" \
++ " %snxe root %u %s%c", \
++ __entry->gfn, role.level, \
++ role.cr4_pae ? " pae" : "", \
++ role.quadrant, \
++ role.direct ? " direct" : "", \
++ access_str[role.access], \
++ role.invalid ? " invalid" : "", \
++ role.nxe ? "" : "!", \
++ __entry->root_count, \
++ __entry->unsync ? "unsync" : "sync", 0); \
++ ret; \
++ })
++
++#define kvm_mmu_trace_pferr_flags \
++ { PFERR_PRESENT_MASK, "P" }, \
++ { PFERR_WRITE_MASK, "W" }, \
++ { PFERR_USER_MASK, "U" }, \
++ { PFERR_RSVD_MASK, "RSVD" }, \
++ { PFERR_FETCH_MASK, "F" }
++
++/*
++ * A pagetable walk has started
++ */
++TRACE_EVENT(
++ kvm_mmu_pagetable_walk,
++ TP_PROTO(u64 addr, u32 pferr),
++ TP_ARGS(addr, pferr),
++
++ TP_STRUCT__entry(
++ __field(__u64, addr)
++ __field(__u32, pferr)
++ ),
++
++ TP_fast_assign(
++ tp_assign(addr, addr)
++ tp_assign(pferr, pferr)
++ ),
++
++ TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
++ __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
++)
++
++
++/* We just walked a paging element */
++TRACE_EVENT(
++ kvm_mmu_paging_element,
++ TP_PROTO(u64 pte, int level),
++ TP_ARGS(pte, level),
++
++ TP_STRUCT__entry(
++ __field(__u64, pte)
++ __field(__u32, level)
++ ),
++
++ TP_fast_assign(
++ tp_assign(pte, pte)
++ tp_assign(level, level)
++ ),
++
++ TP_printk("pte %llx level %u", __entry->pte, __entry->level)
++)
++
++DECLARE_EVENT_CLASS(kvm_mmu_set_bit_class,
++
++ TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
++
++ TP_ARGS(table_gfn, index, size),
++
++ TP_STRUCT__entry(
++ __field(__u64, gpa)
++ ),
++
++ TP_fast_assign(
++ tp_assign(gpa, ((u64)table_gfn << PAGE_SHIFT)
++ + index * size)
++ ),
++
++ TP_printk("gpa %llx", __entry->gpa)
++)
++
++/* We set a pte accessed bit */
++DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
++
++ TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
++
++ TP_ARGS(table_gfn, index, size)
++)
++
++/* We set a pte dirty bit */
++DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
++
++ TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
++
++ TP_ARGS(table_gfn, index, size)
++)
++
++TRACE_EVENT(
++ kvm_mmu_walker_error,
++ TP_PROTO(u32 pferr),
++ TP_ARGS(pferr),
++
++ TP_STRUCT__entry(
++ __field(__u32, pferr)
++ ),
++
++ TP_fast_assign(
++ tp_assign(pferr, pferr)
++ ),
++
++ TP_printk("pferr %x %s", __entry->pferr,
++ __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
++)
++
++TRACE_EVENT(
++ kvm_mmu_get_page,
++ TP_PROTO(struct kvm_mmu_page *sp, bool created),
++ TP_ARGS(sp, created),
++
++ TP_STRUCT__entry(
++ KVM_MMU_PAGE_FIELDS
++ __field(bool, created)
++ ),
++
++ TP_fast_assign(
++ KVM_MMU_PAGE_ASSIGN(sp)
++ tp_assign(created, created)
++ ),
++
++ TP_printk("%s %s", KVM_MMU_PAGE_PRINTK(),
++ __entry->created ? "new" : "existing")
++)
++
++DECLARE_EVENT_CLASS(kvm_mmu_page_class,
++
++ TP_PROTO(struct kvm_mmu_page *sp),
++ TP_ARGS(sp),
++
++ TP_STRUCT__entry(
++ KVM_MMU_PAGE_FIELDS
++ ),
++
++ TP_fast_assign(
++ KVM_MMU_PAGE_ASSIGN(sp)
++ ),
++
++ TP_printk("%s", KVM_MMU_PAGE_PRINTK())
++)
++
++DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_sync_page,
++ TP_PROTO(struct kvm_mmu_page *sp),
++
++ TP_ARGS(sp)
++)
++
++DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_unsync_page,
++ TP_PROTO(struct kvm_mmu_page *sp),
++
++ TP_ARGS(sp)
++)
++
++DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
++ TP_PROTO(struct kvm_mmu_page *sp),
++
++ TP_ARGS(sp)
++)
++
++TRACE_EVENT(
++ mark_mmio_spte,
++ TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access),
++ TP_ARGS(sptep, gfn, access),
++
++ TP_STRUCT__entry(
++ __field(void *, sptep)
++ __field(gfn_t, gfn)
++ __field(unsigned, access)
++ ),
++
++ TP_fast_assign(
++ tp_assign(sptep, sptep)
++ tp_assign(gfn, gfn)
++ tp_assign(access, access)
++ ),
++
++ TP_printk("sptep:%p gfn %llx access %x", __entry->sptep, __entry->gfn,
++ __entry->access)
++)
++
++TRACE_EVENT(
++ handle_mmio_page_fault,
++ TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
++ TP_ARGS(addr, gfn, access),
++
++ TP_STRUCT__entry(
++ __field(u64, addr)
++ __field(gfn_t, gfn)
++ __field(unsigned, access)
++ ),
++
++ TP_fast_assign(
++ tp_assign(addr, addr)
++ tp_assign(gfn, gfn)
++ tp_assign(access, access)
++ ),
++
++ TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
++ __entry->access)
++)
++
++#define __spte_satisfied(__spte) \
++ (__entry->retry && is_writable_pte(__entry->__spte))
++
++TRACE_EVENT(
++ fast_page_fault,
++ TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
++ u64 *sptep, u64 old_spte, bool retry),
++ TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
++
++ TP_STRUCT__entry(
++ __field(int, vcpu_id)
++ __field(gva_t, gva)
++ __field(u32, error_code)
++ __field(u64 *, sptep)
++ __field(u64, old_spte)
++ __field(u64, new_spte)
++ __field(bool, retry)
++ ),
++
++ TP_fast_assign(
++ tp_assign(vcpu_id, vcpu->vcpu_id)
++ tp_assign(gva, gva)
++ tp_assign(error_code, error_code)
++ tp_assign(sptep, sptep)
++ tp_assign(old_spte, old_spte)
++ tp_assign(new_spte, *sptep)
++ tp_assign(retry, retry)
++ ),
++
++ TP_printk("vcpu %d gva %lx error_code %s sptep %p old %#llx"
++ " new %llx spurious %d fixed %d", __entry->vcpu_id,
++ __entry->gva, __print_flags(__entry->error_code, "|",
++ kvm_mmu_trace_pferr_flags), __entry->sptep,
++ __entry->old_spte, __entry->new_spte,
++ __spte_satisfied(old_spte), __spte_satisfied(new_spte)
++ )
++)
++#endif /* _TRACE_KVMMMU_H */
++
++#undef TRACE_INCLUDE_PATH
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module/arch/x86/kvm
++#undef TRACE_INCLUDE_FILE
++#define TRACE_INCLUDE_FILE mmutrace
++
++/* This part must be outside protection */
++#include "../../../../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/arch/x86/kvm/trace.h
+@@ -0,0 +1,833 @@
++#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_KVM_H
++
++#include <linux/tracepoint.h>
++#include <asm/vmx.h>
++#include <asm/svm.h>
++#include <asm/clocksource.h>
++#include <linux/version.h>
++#include <../arch/x86/kvm/lapic.h>
++#include <../arch/x86/kvm/kvm_cache_regs.h>
++
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM kvm
++
++/*
++ * Tracepoint for guest mode entry.
++ */
++TRACE_EVENT(kvm_entry,
++ TP_PROTO(unsigned int vcpu_id),
++ TP_ARGS(vcpu_id),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, vcpu_id )
++ ),
++
++ TP_fast_assign(
++ tp_assign(vcpu_id, vcpu_id)
++ ),
++
++ TP_printk("vcpu %u", __entry->vcpu_id)
++)
++
++/*
++ * Tracepoint for hypercall.
++ */
++TRACE_EVENT(kvm_hypercall,
++ TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
++ unsigned long a2, unsigned long a3),
++ TP_ARGS(nr, a0, a1, a2, a3),
++
++ TP_STRUCT__entry(
++ __field( unsigned long, nr )
++ __field( unsigned long, a0 )
++ __field( unsigned long, a1 )
++ __field( unsigned long, a2 )
++ __field( unsigned long, a3 )
++ ),
++
++ TP_fast_assign(
++ tp_assign(nr, nr)
++ tp_assign(a0, a0)
++ tp_assign(a1, a1)
++ tp_assign(a2, a2)
++ tp_assign(a3, a3)
++ ),
++
++ TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
++ __entry->nr, __entry->a0, __entry->a1, __entry->a2,
++ __entry->a3)
++)
++
++/*
++ * Tracepoint for hypercall.
++ */
++TRACE_EVENT(kvm_hv_hypercall,
++ TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx,
++ __u64 ingpa, __u64 outgpa),
++ TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa),
++
++ TP_STRUCT__entry(
++ __field( __u16, rep_cnt )
++ __field( __u16, rep_idx )
++ __field( __u64, ingpa )
++ __field( __u64, outgpa )
++ __field( __u16, code )
++ __field( bool, fast )
++ ),
++
++ TP_fast_assign(
++ tp_assign(rep_cnt, rep_cnt)
++ tp_assign(rep_idx, rep_idx)
++ tp_assign(ingpa, ingpa)
++ tp_assign(outgpa, outgpa)
++ tp_assign(code, code)
++ tp_assign(fast, fast)
++ ),
++
++ TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
++ __entry->code, __entry->fast ? "fast" : "slow",
++ __entry->rep_cnt, __entry->rep_idx, __entry->ingpa,
++ __entry->outgpa)
++)
++
++/*
++ * Tracepoint for PIO.
++ */
++TRACE_EVENT(kvm_pio,
++ TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
++ unsigned int count),
++ TP_ARGS(rw, port, size, count),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, rw )
++ __field( unsigned int, port )
++ __field( unsigned int, size )
++ __field( unsigned int, count )
++ ),
++
++ TP_fast_assign(
++ tp_assign(rw, rw)
++ tp_assign(port, port)
++ tp_assign(size, size)
++ tp_assign(count, count)
++ ),
++
++ TP_printk("pio_%s at 0x%x size %d count %d",
++ __entry->rw ? "write" : "read",
++ __entry->port, __entry->size, __entry->count)
++)
++
++/*
++ * Tracepoint for cpuid.
++ */
++TRACE_EVENT(kvm_cpuid,
++ TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx,
++ unsigned long rcx, unsigned long rdx),
++ TP_ARGS(function, rax, rbx, rcx, rdx),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, function )
++ __field( unsigned long, rax )
++ __field( unsigned long, rbx )
++ __field( unsigned long, rcx )
++ __field( unsigned long, rdx )
++ ),
++
++ TP_fast_assign(
++ tp_assign(function, function)
++ tp_assign(rax, rax)
++ tp_assign(rbx, rbx)
++ tp_assign(rcx, rcx)
++ tp_assign(rdx, rdx)
++ ),
++
++ TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx",
++ __entry->function, __entry->rax,
++ __entry->rbx, __entry->rcx, __entry->rdx)
++)
++
++#define AREG(x) { APIC_##x, "APIC_" #x }
++
++#define kvm_trace_symbol_apic \
++ AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI), \
++ AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR), \
++ AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \
++ AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR), \
++ AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT), \
++ AREG(ECTRL)
++/*
++ * Tracepoint for apic access.
++ */
++TRACE_EVENT(kvm_apic,
++ TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val),
++ TP_ARGS(rw, reg, val),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, rw )
++ __field( unsigned int, reg )
++ __field( unsigned int, val )
++ ),
++
++ TP_fast_assign(
++ tp_assign(rw, rw)
++ tp_assign(reg, reg)
++ tp_assign(val, val)
++ ),
++
++ TP_printk("apic_%s %s = 0x%x",
++ __entry->rw ? "write" : "read",
++ __print_symbolic(__entry->reg, kvm_trace_symbol_apic),
++ __entry->val)
++)
++
++#define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val)
++#define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val)
++
++#define KVM_ISA_VMX 1
++#define KVM_ISA_SVM 2
++
++/*
++ * Tracepoint for kvm guest exit:
++ */
++TRACE_EVENT(kvm_exit,
++ TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa, u64 info1, u64 info2),
++ TP_ARGS(exit_reason, vcpu, isa, info1, info2),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, exit_reason )
++ __field( unsigned long, guest_rip )
++ __field( u32, isa )
++ __field( u64, info1 )
++ __field( u64, info2 )
++ ),
++
++ TP_fast_assign(
++ tp_assign(exit_reason, exit_reason)
++ tp_assign(guest_rip, kvm_rip_read(vcpu))
++ tp_assign(isa, isa)
++ kvm_x86_ops->get_exit_info(vcpu, &info1,
++ &info2);
++ tp_assign(info1, info1)
++ tp_assign(info2, info2)
++ ),
++
++ TP_printk("reason %s rip info %llx %llx",
++ (__entry->isa == KVM_ISA_VMX) ?
++ __print_symbolic(__entry->exit_reason, VMX_EXIT_REASONS) :
++ __print_symbolic(__entry->exit_reason, SVM_EXIT_REASONS),
++ /* __entry->guest_rip,*/ __entry->info1, __entry->info2)
++)
++
++/*
++ * Tracepoint for kvm interrupt injection:
++ */
++TRACE_EVENT(kvm_inj_virq,
++ TP_PROTO(unsigned int irq),
++ TP_ARGS(irq),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, irq )
++ ),
++
++ TP_fast_assign(
++ tp_assign(irq, irq)
++ ),
++
++ TP_printk("irq %u", __entry->irq)
++)
++
++#define EXS(x) { x##_VECTOR, "#" #x }
++
++#define kvm_trace_sym_exc \
++ EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \
++ EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \
++ EXS(MF), EXS(MC)
++
++/*
++ * Tracepoint for kvm interrupt injection:
++ */
++TRACE_EVENT(kvm_inj_exception,
++ TP_PROTO(unsigned exception, bool has_error, unsigned error_code),
++ TP_ARGS(exception, has_error, error_code),
++
++ TP_STRUCT__entry(
++ __field( u8, exception )
++ __field( u8, has_error )
++ __field( u32, error_code )
++ ),
++
++ TP_fast_assign(
++ tp_assign(exception, exception)
++ tp_assign(has_error, has_error)
++ tp_assign(error_code, error_code)
++ ),
++
++ TP_printk("%s (0x%x)",
++ __print_symbolic(__entry->exception, kvm_trace_sym_exc),
++ /* FIXME: don't print error_code if not present */
++ __entry->has_error ? __entry->error_code : 0)
++)
++
++/*
++ * Tracepoint for page fault.
++ */
++TRACE_EVENT(kvm_page_fault,
++ TP_PROTO(unsigned long fault_address, unsigned int error_code),
++ TP_ARGS(fault_address, error_code),
++
++ TP_STRUCT__entry(
++ __field( unsigned long, fault_address )
++ __field( unsigned int, error_code )
++ ),
++
++ TP_fast_assign(
++ tp_assign(fault_address, fault_address)
++ tp_assign(error_code, error_code)
++ ),
++
++ TP_printk("address %lx error_code %x",
++ __entry->fault_address, __entry->error_code)
++)
++
++/*
++ * Tracepoint for guest MSR access.
++ */
++TRACE_EVENT(kvm_msr,
++ TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
++ TP_ARGS(write, ecx, data, exception),
++
++ TP_STRUCT__entry(
++ __field( unsigned, write )
++ __field( u32, ecx )
++ __field( u64, data )
++ __field( u8, exception )
++ ),
++
++ TP_fast_assign(
++ tp_assign(write, write)
++ tp_assign(ecx, ecx)
++ tp_assign(data, data)
++ tp_assign(exception, exception)
++ ),
++
++ TP_printk("msr_%s %x = 0x%llx%s",
++ __entry->write ? "write" : "read",
++ __entry->ecx, __entry->data,
++ __entry->exception ? " (#GP)" : "")
++)
++
++#define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data, false)
++#define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data, false)
++#define trace_kvm_msr_read_ex(ecx) trace_kvm_msr(0, ecx, 0, true)
++#define trace_kvm_msr_write_ex(ecx, data) trace_kvm_msr(1, ecx, data, true)
++
++/*
++ * Tracepoint for guest CR access.
++ */
++TRACE_EVENT(kvm_cr,
++ TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
++ TP_ARGS(rw, cr, val),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, rw )
++ __field( unsigned int, cr )
++ __field( unsigned long, val )
++ ),
++
++ TP_fast_assign(
++ tp_assign(rw, rw)
++ tp_assign(cr, cr)
++ tp_assign(val, val)
++ ),
++
++ TP_printk("cr_%s %x = 0x%lx",
++ __entry->rw ? "write" : "read",
++ __entry->cr, __entry->val)
++)
++
++#define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val)
++#define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val)
++
++TRACE_EVENT(kvm_pic_set_irq,
++ TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced),
++ TP_ARGS(chip, pin, elcr, imr, coalesced),
++
++ TP_STRUCT__entry(
++ __field( __u8, chip )
++ __field( __u8, pin )
++ __field( __u8, elcr )
++ __field( __u8, imr )
++ __field( bool, coalesced )
++ ),
++
++ TP_fast_assign(
++ tp_assign(chip, chip)
++ tp_assign(pin, pin)
++ tp_assign(elcr, elcr)
++ tp_assign(imr, imr)
++ tp_assign(coalesced, coalesced)
++ ),
++
++ TP_printk("chip %u pin %u (%s%s)%s",
++ __entry->chip, __entry->pin,
++ (__entry->elcr & (1 << __entry->pin)) ? "level":"edge",
++ (__entry->imr & (1 << __entry->pin)) ? "|masked":"",
++ __entry->coalesced ? " (coalesced)" : "")
++)
++
++#define kvm_apic_dst_shorthand \
++ {0x0, "dst"}, \
++ {0x1, "self"}, \
++ {0x2, "all"}, \
++ {0x3, "all-but-self"}
++
++TRACE_EVENT(kvm_apic_ipi,
++ TP_PROTO(__u32 icr_low, __u32 dest_id),
++ TP_ARGS(icr_low, dest_id),
++
++ TP_STRUCT__entry(
++ __field( __u32, icr_low )
++ __field( __u32, dest_id )
++ ),
++
++ TP_fast_assign(
++ tp_assign(icr_low, icr_low)
++ tp_assign(dest_id, dest_id)
++ ),
++
++ TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)",
++ __entry->dest_id, (u8)__entry->icr_low,
++ __print_symbolic((__entry->icr_low >> 8 & 0x7),
++ kvm_deliver_mode),
++ (__entry->icr_low & (1<<11)) ? "logical" : "physical",
++ (__entry->icr_low & (1<<14)) ? "assert" : "de-assert",
++ (__entry->icr_low & (1<<15)) ? "level" : "edge",
++ __print_symbolic((__entry->icr_low >> 18 & 0x3),
++ kvm_apic_dst_shorthand))
++)
++
++TRACE_EVENT(kvm_apic_accept_irq,
++ TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec, bool coalesced),
++ TP_ARGS(apicid, dm, tm, vec, coalesced),
++
++ TP_STRUCT__entry(
++ __field( __u32, apicid )
++ __field( __u16, dm )
++ __field( __u8, tm )
++ __field( __u8, vec )
++ __field( bool, coalesced )
++ ),
++
++ TP_fast_assign(
++ tp_assign(apicid, apicid)
++ tp_assign(dm, dm)
++ tp_assign(tm, tm)
++ tp_assign(vec, vec)
++ tp_assign(coalesced, coalesced)
++ ),
++
++ TP_printk("apicid %x vec %u (%s|%s)%s",
++ __entry->apicid, __entry->vec,
++ __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
++ __entry->tm ? "level" : "edge",
++ __entry->coalesced ? " (coalesced)" : "")
++)
++
++TRACE_EVENT(kvm_eoi,
++ TP_PROTO(struct kvm_lapic *apic, int vector),
++ TP_ARGS(apic, vector),
++
++ TP_STRUCT__entry(
++ __field( __u32, apicid )
++ __field( int, vector )
++ ),
++
++ TP_fast_assign(
++ tp_assign(apicid, apic->vcpu->vcpu_id)
++ tp_assign(vector, vector)
++ ),
++
++ TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
++)
++
++TRACE_EVENT(kvm_pv_eoi,
++ TP_PROTO(struct kvm_lapic *apic, int vector),
++ TP_ARGS(apic, vector),
++
++ TP_STRUCT__entry(
++ __field( __u32, apicid )
++ __field( int, vector )
++ ),
++
++ TP_fast_assign(
++ tp_assign(apicid, apic->vcpu->vcpu_id)
++ tp_assign(vector, vector)
++ ),
++
++ TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
++)
++
++/*
++ * Tracepoint for nested VMRUN
++ */
++TRACE_EVENT(kvm_nested_vmrun,
++ TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
++ __u32 event_inj, bool npt),
++ TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt),
++
++ TP_STRUCT__entry(
++ __field( __u64, rip )
++ __field( __u64, vmcb )
++ __field( __u64, nested_rip )
++ __field( __u32, int_ctl )
++ __field( __u32, event_inj )
++ __field( bool, npt )
++ ),
++
++ TP_fast_assign(
++ tp_assign(rip, rip)
++ tp_assign(vmcb, vmcb)
++ tp_assign(nested_rip, nested_rip)
++ tp_assign(int_ctl, int_ctl)
++ tp_assign(event_inj, event_inj)
++ tp_assign(npt, npt)
++ ),
++
++ TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x "
++ "event_inj: 0x%08x npt: %s",
++ __entry->rip, __entry->vmcb, __entry->nested_rip,
++ __entry->int_ctl, __entry->event_inj,
++ __entry->npt ? "on" : "off")
++)
++
++TRACE_EVENT(kvm_nested_intercepts,
++ TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept),
++ TP_ARGS(cr_read, cr_write, exceptions, intercept),
++
++ TP_STRUCT__entry(
++ __field( __u16, cr_read )
++ __field( __u16, cr_write )
++ __field( __u32, exceptions )
++ __field( __u64, intercept )
++ ),
++
++ TP_fast_assign(
++ tp_assign(cr_read, cr_read)
++ tp_assign(cr_write, cr_write)
++ tp_assign(exceptions, exceptions)
++ tp_assign(intercept, intercept)
++ ),
++
++ TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx",
++ __entry->cr_read, __entry->cr_write, __entry->exceptions,
++ __entry->intercept)
++)
++/*
++ * Tracepoint for #VMEXIT while nested
++ */
++TRACE_EVENT(kvm_nested_vmexit,
++ TP_PROTO(__u64 rip, __u32 exit_code,
++ __u64 exit_info1, __u64 exit_info2,
++ __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
++ TP_ARGS(rip, exit_code, exit_info1, exit_info2,
++ exit_int_info, exit_int_info_err, isa),
++
++ TP_STRUCT__entry(
++ __field( __u64, rip )
++ __field( __u32, exit_code )
++ __field( __u64, exit_info1 )
++ __field( __u64, exit_info2 )
++ __field( __u32, exit_int_info )
++ __field( __u32, exit_int_info_err )
++ __field( __u32, isa )
++ ),
++
++ TP_fast_assign(
++ tp_assign(rip, rip)
++ tp_assign(exit_code, exit_code)
++ tp_assign(exit_info1, exit_info1)
++ tp_assign(exit_info2, exit_info2)
++ tp_assign(exit_int_info, exit_int_info)
++ tp_assign(exit_int_info_err, exit_int_info_err)
++ tp_assign(isa, isa)
++ ),
++ TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx "
++ "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
++ __entry->rip,
++ (__entry->isa == KVM_ISA_VMX) ?
++ __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) :
++ __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS),
++ __entry->exit_info1, __entry->exit_info2,
++ __entry->exit_int_info, __entry->exit_int_info_err)
++)
++
++/*
++ * Tracepoint for #VMEXIT reinjected to the guest
++ */
++TRACE_EVENT(kvm_nested_vmexit_inject,
++ TP_PROTO(__u32 exit_code,
++ __u64 exit_info1, __u64 exit_info2,
++ __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
++ TP_ARGS(exit_code, exit_info1, exit_info2,
++ exit_int_info, exit_int_info_err, isa),
++
++ TP_STRUCT__entry(
++ __field( __u32, exit_code )
++ __field( __u64, exit_info1 )
++ __field( __u64, exit_info2 )
++ __field( __u32, exit_int_info )
++ __field( __u32, exit_int_info_err )
++ __field( __u32, isa )
++ ),
++
++ TP_fast_assign(
++ tp_assign(exit_code, exit_code)
++ tp_assign(exit_info1, exit_info1)
++ tp_assign(exit_info2, exit_info2)
++ tp_assign(exit_int_info, exit_int_info)
++ tp_assign(exit_int_info_err, exit_int_info_err)
++ tp_assign(isa, isa)
++ ),
++
++ TP_printk("reason: %s ext_inf1: 0x%016llx "
++ "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
++ (__entry->isa == KVM_ISA_VMX) ?
++ __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) :
++ __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS),
++ __entry->exit_info1, __entry->exit_info2,
++ __entry->exit_int_info, __entry->exit_int_info_err)
++)
++
++/*
++ * Tracepoint for nested #vmexit because of interrupt pending
++ */
++TRACE_EVENT(kvm_nested_intr_vmexit,
++ TP_PROTO(__u64 rip),
++ TP_ARGS(rip),
++
++ TP_STRUCT__entry(
++ __field( __u64, rip )
++ ),
++
++ TP_fast_assign(
++ tp_assign(rip, rip)
++ ),
++
++ TP_printk("rip: 0x%016llx", __entry->rip)
++)
++
++/*
++ * Tracepoint for nested #vmexit because of interrupt pending
++ */
++TRACE_EVENT(kvm_invlpga,
++ TP_PROTO(__u64 rip, int asid, u64 address),
++ TP_ARGS(rip, asid, address),
++
++ TP_STRUCT__entry(
++ __field( __u64, rip )
++ __field( int, asid )
++ __field( __u64, address )
++ ),
++
++ TP_fast_assign(
++ tp_assign(rip, rip)
++ tp_assign(asid, asid)
++ tp_assign(address, address)
++ ),
++
++ TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
++ __entry->rip, __entry->asid, __entry->address)
++)
++
++/*
++ * Tracepoint for nested #vmexit because of interrupt pending
++ */
++TRACE_EVENT(kvm_skinit,
++ TP_PROTO(__u64 rip, __u32 slb),
++ TP_ARGS(rip, slb),
++
++ TP_STRUCT__entry(
++ __field( __u64, rip )
++ __field( __u32, slb )
++ ),
++
++ TP_fast_assign(
++ tp_assign(rip, rip)
++ tp_assign(slb, slb)
++ ),
++
++ TP_printk("rip: 0x%016llx slb: 0x%08x",
++ __entry->rip, __entry->slb)
++)
++
++#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
++#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
++#define KVM_EMUL_INSN_F_CS_D (1 << 2)
++#define KVM_EMUL_INSN_F_CS_L (1 << 3)
++
++#define kvm_trace_symbol_emul_flags \
++ { 0, "real" }, \
++ { KVM_EMUL_INSN_F_CR0_PE \
++ | KVM_EMUL_INSN_F_EFL_VM, "vm16" }, \
++ { KVM_EMUL_INSN_F_CR0_PE, "prot16" }, \
++ { KVM_EMUL_INSN_F_CR0_PE \
++ | KVM_EMUL_INSN_F_CS_D, "prot32" }, \
++ { KVM_EMUL_INSN_F_CR0_PE \
++ | KVM_EMUL_INSN_F_CS_L, "prot64" }
++
++#define kei_decode_mode(mode) ({ \
++ u8 flags = 0xff; \
++ switch (mode) { \
++ case X86EMUL_MODE_REAL: \
++ flags = 0; \
++ break; \
++ case X86EMUL_MODE_VM86: \
++ flags = KVM_EMUL_INSN_F_EFL_VM; \
++ break; \
++ case X86EMUL_MODE_PROT16: \
++ flags = KVM_EMUL_INSN_F_CR0_PE; \
++ break; \
++ case X86EMUL_MODE_PROT32: \
++ flags = KVM_EMUL_INSN_F_CR0_PE \
++ | KVM_EMUL_INSN_F_CS_D; \
++ break; \
++ case X86EMUL_MODE_PROT64: \
++ flags = KVM_EMUL_INSN_F_CR0_PE \
++ | KVM_EMUL_INSN_F_CS_L; \
++ break; \
++ } \
++ flags; \
++ })
++
++TRACE_EVENT(kvm_emulate_insn,
++ TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
++ TP_ARGS(vcpu, failed),
++
++ TP_STRUCT__entry(
++ __field( __u64, rip )
++ __field( __u32, csbase )
++ __field( __u8, len )
++ __array( __u8, insn, 15 )
++ __field( __u8, flags )
++ __field( __u8, failed )
++ ),
++
++ TP_fast_assign(
++ tp_assign(rip, vcpu->arch.emulate_ctxt.fetch.start)
++ tp_assign(csbase, kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS))
++ tp_assign(len, vcpu->arch.emulate_ctxt._eip
++ - vcpu->arch.emulate_ctxt.fetch.start)
++ tp_memcpy(insn,
++ vcpu->arch.emulate_ctxt.fetch.data,
++ 15)
++ tp_assign(flags, kei_decode_mode(vcpu->arch.emulate_ctxt.mode))
++ tp_assign(failed, failed)
++ ),
++
++ TP_printk("%x:%llx:%s (%s)%s",
++ __entry->csbase, __entry->rip,
++ __print_hex(__entry->insn, __entry->len),
++ __print_symbolic(__entry->flags,
++ kvm_trace_symbol_emul_flags),
++ __entry->failed ? " failed" : ""
++ )
++ )
++
++#define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
++#define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
++
++TRACE_EVENT(
++ vcpu_match_mmio,
++ TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
++ TP_ARGS(gva, gpa, write, gpa_match),
++
++ TP_STRUCT__entry(
++ __field(gva_t, gva)
++ __field(gpa_t, gpa)
++ __field(bool, write)
++ __field(bool, gpa_match)
++ ),
++
++ TP_fast_assign(
++ tp_assign(gva, gva)
++ tp_assign(gpa, gpa)
++ tp_assign(write, write)
++ tp_assign(gpa_match, gpa_match)
++ ),
++
++ TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
++ __entry->write ? "Write" : "Read",
++ __entry->gpa_match ? "GPA" : "GVA")
++)
++
++#ifdef CONFIG_X86_64
++
++#define host_clocks \
++ {VCLOCK_NONE, "none"}, \
++ {VCLOCK_TSC, "tsc"}, \
++ {VCLOCK_HPET, "hpet"} \
++
++TRACE_EVENT(kvm_update_master_clock,
++ TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched),
++ TP_ARGS(use_master_clock, host_clock, offset_matched),
++
++ TP_STRUCT__entry(
++ __field( bool, use_master_clock )
++ __field( unsigned int, host_clock )
++ __field( bool, offset_matched )
++ ),
++
++ TP_fast_assign(
++ tp_assign(use_master_clock, use_master_clock)
++ tp_assign(host_clock, host_clock)
++ tp_assign(offset_matched, offset_matched)
++ ),
++
++ TP_printk("masterclock %d hostclock %s offsetmatched %u",
++ __entry->use_master_clock,
++ __print_symbolic(__entry->host_clock, host_clocks),
++ __entry->offset_matched)
++)
++
++TRACE_EVENT(kvm_track_tsc,
++ TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched,
++ unsigned int online_vcpus, bool use_master_clock,
++ unsigned int host_clock),
++ TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock,
++ host_clock),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, vcpu_id )
++ __field( unsigned int, nr_vcpus_matched_tsc )
++ __field( unsigned int, online_vcpus )
++ __field( bool, use_master_clock )
++ __field( unsigned int, host_clock )
++ ),
++
++ TP_fast_assign(
++ tp_assign(vcpu_id, vcpu_id)
++ tp_assign(nr_vcpus_matched_tsc, nr_matched)
++ tp_assign(online_vcpus, online_vcpus)
++ tp_assign(use_master_clock, use_master_clock)
++ tp_assign(host_clock, host_clock)
++ ),
++
++ TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u"
++ " hostclock %s",
++ __entry->vcpu_id, __entry->use_master_clock,
++ __entry->nr_vcpus_matched_tsc, __entry->online_vcpus,
++ __print_symbolic(__entry->host_clock, host_clocks))
++)
++
++#endif /* CONFIG_X86_64 */
++
++#endif /* _TRACE_KVM_H */
++
++#undef TRACE_INCLUDE_PATH
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module/arch/x86/kvm
++#undef TRACE_INCLUDE_FILE
++#define TRACE_INCLUDE_FILE trace
++
++/* This part must be outside protection */
++#include "../../../../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/asoc.h
+@@ -0,0 +1,422 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM asoc
++
++#if !defined(_TRACE_ASOC_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_ASOC_H
++
++#include <linux/ktime.h>
++#include <linux/tracepoint.h>
++#include <linux/version.h>
++
++#define DAPM_DIRECT "(direct)"
++
++#ifndef _TRACE_ASOC_DEF
++#define _TRACE_ASOC_DEF
++struct snd_soc_jack;
++struct snd_soc_codec;
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
++struct snd_soc_platform;
++#endif
++struct snd_soc_card;
++struct snd_soc_dapm_widget;
++#endif
++
++/*
++ * Log register events
++ */
++DECLARE_EVENT_CLASS(snd_soc_reg,
++
++ TP_PROTO(struct snd_soc_codec *codec, unsigned int reg,
++ unsigned int val),
++
++ TP_ARGS(codec, reg, val),
++
++ TP_STRUCT__entry(
++ __string( name, codec->name )
++ __field( int, id )
++ __field( unsigned int, reg )
++ __field( unsigned int, val )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, codec->name)
++ tp_assign(id, codec->id)
++ tp_assign(reg, reg)
++ tp_assign(val, val)
++ ),
++
++ TP_printk("codec=%s.%d reg=%x val=%x", __get_str(name),
++ (int)__entry->id, (unsigned int)__entry->reg,
++ (unsigned int)__entry->val)
++)
++
++DEFINE_EVENT(snd_soc_reg, snd_soc_reg_write,
++
++ TP_PROTO(struct snd_soc_codec *codec, unsigned int reg,
++ unsigned int val),
++
++ TP_ARGS(codec, reg, val)
++
++)
++
++DEFINE_EVENT(snd_soc_reg, snd_soc_reg_read,
++
++ TP_PROTO(struct snd_soc_codec *codec, unsigned int reg,
++ unsigned int val),
++
++ TP_ARGS(codec, reg, val)
++
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
++DECLARE_EVENT_CLASS(snd_soc_preg,
++
++ TP_PROTO(struct snd_soc_platform *platform, unsigned int reg,
++ unsigned int val),
++
++ TP_ARGS(platform, reg, val),
++
++ TP_STRUCT__entry(
++ __string( name, platform->name )
++ __field( int, id )
++ __field( unsigned int, reg )
++ __field( unsigned int, val )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, platform->name)
++ tp_assign(id, platform->id)
++ tp_assign(reg, reg)
++ tp_assign(val, val)
++ ),
++
++ TP_printk("platform=%s.%d reg=%x val=%x", __get_str(name),
++ (int)__entry->id, (unsigned int)__entry->reg,
++ (unsigned int)__entry->val)
++)
++
++DEFINE_EVENT(snd_soc_preg, snd_soc_preg_write,
++
++ TP_PROTO(struct snd_soc_platform *platform, unsigned int reg,
++ unsigned int val),
++
++ TP_ARGS(platform, reg, val)
++
++)
++
++DEFINE_EVENT(snd_soc_preg, snd_soc_preg_read,
++
++ TP_PROTO(struct snd_soc_platform *platform, unsigned int reg,
++ unsigned int val),
++
++ TP_ARGS(platform, reg, val)
++
++)
++#endif
++
++DECLARE_EVENT_CLASS(snd_soc_card,
++
++ TP_PROTO(struct snd_soc_card *card, int val),
++
++ TP_ARGS(card, val),
++
++ TP_STRUCT__entry(
++ __string( name, card->name )
++ __field( int, val )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, card->name)
++ tp_assign(val, val)
++ ),
++
++ TP_printk("card=%s val=%d", __get_str(name), (int)__entry->val)
++)
++
++DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_start,
++
++ TP_PROTO(struct snd_soc_card *card, int val),
++
++ TP_ARGS(card, val)
++
++)
++
++DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_done,
++
++ TP_PROTO(struct snd_soc_card *card, int val),
++
++ TP_ARGS(card, val)
++
++)
++
++DECLARE_EVENT_CLASS(snd_soc_dapm_basic,
++
++ TP_PROTO(struct snd_soc_card *card),
++
++ TP_ARGS(card),
++
++ TP_STRUCT__entry(
++ __string( name, card->name )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, card->name)
++ ),
++
++ TP_printk("card=%s", __get_str(name))
++)
++
++DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_start,
++
++ TP_PROTO(struct snd_soc_card *card),
++
++ TP_ARGS(card)
++
++)
++
++DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_done,
++
++ TP_PROTO(struct snd_soc_card *card),
++
++ TP_ARGS(card)
++
++)
++
++DECLARE_EVENT_CLASS(snd_soc_dapm_widget,
++
++ TP_PROTO(struct snd_soc_dapm_widget *w, int val),
++
++ TP_ARGS(w, val),
++
++ TP_STRUCT__entry(
++ __string( name, w->name )
++ __field( int, val )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, w->name)
++ tp_assign(val, val)
++ ),
++
++ TP_printk("widget=%s val=%d", __get_str(name),
++ (int)__entry->val)
++)
++
++DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_power,
++
++ TP_PROTO(struct snd_soc_dapm_widget *w, int val),
++
++ TP_ARGS(w, val)
++
++)
++
++DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_event_start,
++
++ TP_PROTO(struct snd_soc_dapm_widget *w, int val),
++
++ TP_ARGS(w, val)
++
++)
++
++DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_event_done,
++
++ TP_PROTO(struct snd_soc_dapm_widget *w, int val),
++
++ TP_ARGS(w, val)
++
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
++TRACE_EVENT(snd_soc_dapm_walk_done,
++
++ TP_PROTO(struct snd_soc_card *card),
++
++ TP_ARGS(card),
++
++ TP_STRUCT__entry(
++ __string( name, card->name )
++ __field( int, power_checks )
++ __field( int, path_checks )
++ __field( int, neighbour_checks )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, card->name)
++ tp_assign(power_checks, card->dapm_stats.power_checks)
++ tp_assign(path_checks, card->dapm_stats.path_checks)
++ tp_assign(neighbour_checks, card->dapm_stats.neighbour_checks)
++ ),
++
++ TP_printk("%s: checks %d power, %d path, %d neighbour",
++ __get_str(name), (int)__entry->power_checks,
++ (int)__entry->path_checks, (int)__entry->neighbour_checks)
++)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0))
++TRACE_EVENT(snd_soc_dapm_output_path,
++
++ TP_PROTO(struct snd_soc_dapm_widget *widget,
++ struct snd_soc_dapm_path *path),
++
++ TP_ARGS(widget, path),
++
++ TP_STRUCT__entry(
++ __string( wname, widget->name )
++ __string( pname, path->name ? path->name : DAPM_DIRECT)
++ __string( psname, path->sink->name )
++ __field( int, path_sink )
++ __field( int, path_connect )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(wname, widget->name)
++ tp_strcpy(pname, path->name ? path->name : DAPM_DIRECT)
++ tp_strcpy(psname, path->sink->name)
++ tp_assign(path_connect, path->connect)
++ tp_assign(path_sink, (long)path->sink)
++ ),
++
++ TP_printk("%c%s -> %s -> %s\n",
++ (int) __entry->path_sink &&
++ (int) __entry->path_connect ? '*' : ' ',
++ __get_str(wname), __get_str(pname), __get_str(psname))
++)
++
++TRACE_EVENT(snd_soc_dapm_input_path,
++
++ TP_PROTO(struct snd_soc_dapm_widget *widget,
++ struct snd_soc_dapm_path *path),
++
++ TP_ARGS(widget, path),
++
++ TP_STRUCT__entry(
++ __string( wname, widget->name )
++ __string( pname, path->name ? path->name : DAPM_DIRECT)
++ __string( psname, path->source->name )
++ __field( int, path_source )
++ __field( int, path_connect )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(wname, widget->name)
++ tp_strcpy(pname, path->name ? path->name : DAPM_DIRECT)
++ tp_strcpy(psname, path->source->name)
++ tp_assign(path_connect, path->connect)
++ tp_assign(path_source, (long)path->source)
++ ),
++
++ TP_printk("%c%s <- %s <- %s\n",
++ (int) __entry->path_source &&
++ (int) __entry->path_connect ? '*' : ' ',
++ __get_str(wname), __get_str(pname), __get_str(psname))
++)
++
++TRACE_EVENT(snd_soc_dapm_connected,
++
++ TP_PROTO(int paths, int stream),
++
++ TP_ARGS(paths, stream),
++
++ TP_STRUCT__entry(
++ __field( int, paths )
++ __field( int, stream )
++ ),
++
++ TP_fast_assign(
++ tp_assign(paths, paths)
++ tp_assign(stream, stream)
++ ),
++
++ TP_printk("%s: found %d paths\n",
++ __entry->stream ? "capture" : "playback", __entry->paths)
++)
++#endif
++
++TRACE_EVENT(snd_soc_jack_irq,
++
++ TP_PROTO(const char *name),
++
++ TP_ARGS(name),
++
++ TP_STRUCT__entry(
++ __string( name, name )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, name)
++ ),
++
++ TP_printk("%s", __get_str(name))
++)
++
++TRACE_EVENT(snd_soc_jack_report,
++
++ TP_PROTO(struct snd_soc_jack *jack, int mask, int val),
++
++ TP_ARGS(jack, mask, val),
++
++ TP_STRUCT__entry(
++ __string( name, jack->jack->name )
++ __field( int, mask )
++ __field( int, val )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, jack->jack->name)
++ tp_assign(mask, mask)
++ tp_assign(val, val)
++ ),
++
++ TP_printk("jack=%s %x/%x", __get_str(name), (int)__entry->val,
++ (int)__entry->mask)
++)
++
++TRACE_EVENT(snd_soc_jack_notify,
++
++ TP_PROTO(struct snd_soc_jack *jack, int val),
++
++ TP_ARGS(jack, val),
++
++ TP_STRUCT__entry(
++ __string( name, jack->jack->name )
++ __field( int, val )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, jack->jack->name)
++ tp_assign(val, val)
++ ),
++
++ TP_printk("jack=%s %x", __get_str(name), (int)__entry->val)
++)
++
++TRACE_EVENT(snd_soc_cache_sync,
++
++ TP_PROTO(struct snd_soc_codec *codec, const char *type,
++ const char *status),
++
++ TP_ARGS(codec, type, status),
++
++ TP_STRUCT__entry(
++ __string( name, codec->name )
++ __string( status, status )
++ __string( type, type )
++ __field( int, id )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, codec->name)
++ tp_strcpy(status, status)
++ tp_strcpy(type, type)
++ tp_assign(id, codec->id)
++ ),
++
++ TP_printk("codec=%s.%d type=%s status=%s", __get_str(name),
++ (int)__entry->id, __get_str(type), __get_str(status))
++)
++
++#endif /* _TRACE_ASOC_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/block.h
+@@ -0,0 +1,878 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM block
++
++#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_BLOCK_H
++
++#include <linux/blktrace_api.h>
++#include <linux/blkdev.h>
++#include <linux/tracepoint.h>
++#include <linux/trace_seq.h>
++#include <linux/version.h>
++
++#define RWBS_LEN 8
++
++#ifndef _TRACE_BLOCK_DEF_
++#define _TRACE_BLOCK_DEF_
++
++#define __blk_dump_cmd(cmd, len) "<unknown>"
++
++enum {
++ RWBS_FLAG_WRITE = (1 << 0),
++ RWBS_FLAG_DISCARD = (1 << 1),
++ RWBS_FLAG_READ = (1 << 2),
++ RWBS_FLAG_RAHEAD = (1 << 3),
++ RWBS_FLAG_BARRIER = (1 << 4),
++ RWBS_FLAG_SYNC = (1 << 5),
++ RWBS_FLAG_META = (1 << 6),
++ RWBS_FLAG_SECURE = (1 << 7),
++ RWBS_FLAG_FLUSH = (1 << 8),
++ RWBS_FLAG_FUA = (1 << 9),
++};
++
++#endif /* _TRACE_BLOCK_DEF_ */
++
++#define __print_rwbs_flags(rwbs) \
++ __print_flags(rwbs, "", \
++ { RWBS_FLAG_FLUSH, "F" }, \
++ { RWBS_FLAG_WRITE, "W" }, \
++ { RWBS_FLAG_DISCARD, "D" }, \
++ { RWBS_FLAG_READ, "R" }, \
++ { RWBS_FLAG_FUA, "F" }, \
++ { RWBS_FLAG_RAHEAD, "A" }, \
++ { RWBS_FLAG_BARRIER, "B" }, \
++ { RWBS_FLAG_SYNC, "S" }, \
++ { RWBS_FLAG_META, "M" }, \
++ { RWBS_FLAG_SECURE, "E" })
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
++
++#define blk_fill_rwbs(rwbs, rw, bytes) \
++ tp_assign(rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
++ ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \
++ ( (bytes) ? RWBS_FLAG_READ : \
++ ( 0 )))) \
++ | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
++ | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
++ | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
++ | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0) \
++ | ((rw) & REQ_FLUSH ? RWBS_FLAG_FLUSH : 0) \
++ | ((rw) & REQ_FUA ? RWBS_FLAG_FUA : 0))
++
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++
++#define blk_fill_rwbs(rwbs, rw, bytes) \
++ tp_assign(rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
++ ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \
++ ( (bytes) ? RWBS_FLAG_READ : \
++ ( 0 )))) \
++ | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
++ | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
++ | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
++ | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0))
++
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
++
++#define blk_fill_rwbs(rwbs, rw, bytes) \
++ tp_assign(rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
++ ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \
++ ( (bytes) ? RWBS_FLAG_READ : \
++ ( 0 )))) \
++ | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
++ | ((rw) & REQ_HARDBARRIER ? RWBS_FLAG_BARRIER : 0) \
++ | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
++ | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
++ | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0))
++
++#else
++
++#define blk_fill_rwbs(rwbs, rw, bytes) \
++ tp_assign(rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
++ ( (rw) & (1 << BIO_RW_DISCARD) ? RWBS_FLAG_DISCARD : \
++ ( (bytes) ? RWBS_FLAG_READ : \
++ ( 0 )))) \
++ | ((rw) & (1 << BIO_RW_AHEAD) ? RWBS_FLAG_RAHEAD : 0) \
++ | ((rw) & (1 << BIO_RW_SYNCIO) ? RWBS_FLAG_SYNC : 0) \
++ | ((rw) & (1 << BIO_RW_META) ? RWBS_FLAG_META : 0) \
++ | ((rw) & (1 << BIO_RW_BARRIER) ? RWBS_FLAG_BARRIER : 0))
++
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
++DECLARE_EVENT_CLASS(block_buffer,
++
++ TP_PROTO(struct buffer_head *bh),
++
++ TP_ARGS(bh),
++
++ TP_STRUCT__entry (
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( size_t, size )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, bh->b_bdev->bd_dev)
++ tp_assign(sector, bh->b_blocknr)
++ tp_assign(size, bh->b_size)
++ ),
++
++ TP_printk("%d,%d sector=%llu size=%zu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long long)__entry->sector, __entry->size
++ )
++)
++
++/**
++ * block_touch_buffer - mark a buffer accessed
++ * @bh: buffer_head being touched
++ *
++ * Called from touch_buffer().
++ */
++DEFINE_EVENT(block_buffer, block_touch_buffer,
++
++ TP_PROTO(struct buffer_head *bh),
++
++ TP_ARGS(bh)
++)
++
++/**
++ * block_dirty_buffer - mark a buffer dirty
++ * @bh: buffer_head being dirtied
++ *
++ * Called from mark_buffer_dirty().
++ */
++DEFINE_EVENT(block_buffer, block_dirty_buffer,
++
++ TP_PROTO(struct buffer_head *bh),
++
++ TP_ARGS(bh)
++)
++#endif
++
++DECLARE_EVENT_CLASS(block_rq_with_error,
++
++ TP_PROTO(struct request_queue *q, struct request *rq),
++
++ TP_ARGS(q, rq),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( unsigned int, nr_sector )
++ __field( int, errors )
++ __field( unsigned int, rwbs )
++ __dynamic_array_hex( unsigned char, cmd,
++ (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++ rq->cmd_len : 0)
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
++ tp_assign(sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++ 0 : blk_rq_pos(rq))
++ tp_assign(nr_sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++ 0 : blk_rq_sectors(rq))
++ tp_assign(errors, rq->errors)
++ blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq))
++ tp_memcpy_dyn(cmd, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++ rq->cmd : NULL)
++ ),
++
++ TP_printk("%d,%d %s (%s) %llu + %u [%d]",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __print_rwbs_flags(__entry->rwbs),
++ __blk_dump_cmd(__get_dynamic_array(cmd),
++ __get_dynamic_array_len(cmd)),
++ (unsigned long long)__entry->sector,
++ __entry->nr_sector, __entry->errors)
++)
++
++/**
++ * block_rq_abort - abort block operation request
++ * @q: queue containing the block operation request
++ * @rq: block IO operation request
++ *
++ * Called immediately after pending block IO operation request @rq in
++ * queue @q is aborted. The fields in the operation request @rq
++ * can be examined to determine which device and sectors the pending
++ * operation would access.
++ */
++DEFINE_EVENT(block_rq_with_error, block_rq_abort,
++
++ TP_PROTO(struct request_queue *q, struct request *rq),
++
++ TP_ARGS(q, rq)
++)
++
++/**
++ * block_rq_requeue - place block IO request back on a queue
++ * @q: queue holding operation
++ * @rq: block IO operation request
++ *
++ * The block operation request @rq is being placed back into queue
++ * @q. For some reason the request was not completed and needs to be
++ * put back in the queue.
++ */
++DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
++
++ TP_PROTO(struct request_queue *q, struct request *rq),
++
++ TP_ARGS(q, rq)
++)
++
++/**
++ * block_rq_complete - block IO operation completed by device driver
++ * @q: queue containing the block operation request
++ * @rq: block operations request
++ *
++ * The block_rq_complete tracepoint event indicates that some portion
++ * of operation request has been completed by the device driver. If
++ * the @rq->bio is %NULL, then there is absolutely no additional work to
++ * do for the request. If @rq->bio is non-NULL then there is
++ * additional work required to complete the request.
++ */
++DEFINE_EVENT(block_rq_with_error, block_rq_complete,
++
++ TP_PROTO(struct request_queue *q, struct request *rq),
++
++ TP_ARGS(q, rq)
++)
++
++DECLARE_EVENT_CLASS(block_rq,
++
++ TP_PROTO(struct request_queue *q, struct request *rq),
++
++ TP_ARGS(q, rq),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( unsigned int, nr_sector )
++ __field( unsigned int, bytes )
++ __field( unsigned int, rwbs )
++ __array_text( char, comm, TASK_COMM_LEN )
++ __dynamic_array_hex( unsigned char, cmd,
++ (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++ rq->cmd_len : 0)
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
++ tp_assign(sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++ 0 : blk_rq_pos(rq))
++ tp_assign(nr_sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++ 0 : blk_rq_sectors(rq))
++ tp_assign(bytes, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++ blk_rq_bytes(rq) : 0)
++ blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq))
++ tp_memcpy_dyn(cmd, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++ rq->cmd : NULL)
++ tp_memcpy(comm, current->comm, TASK_COMM_LEN)
++ ),
++
++ TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __print_rwbs_flags(__entry->rwbs),
++ __entry->bytes,
++ __blk_dump_cmd(__get_dynamic_array(cmd),
++ __get_dynamic_array_len(cmd)),
++ (unsigned long long)__entry->sector,
++ __entry->nr_sector, __entry->comm)
++)
++
++/**
++ * block_rq_insert - insert block operation request into queue
++ * @q: target queue
++ * @rq: block IO operation request
++ *
++ * Called immediately before block operation request @rq is inserted
++ * into queue @q. The fields in the operation request @rq struct can
++ * be examined to determine which device and sectors the pending
++ * operation would access.
++ */
++DEFINE_EVENT(block_rq, block_rq_insert,
++
++ TP_PROTO(struct request_queue *q, struct request *rq),
++
++ TP_ARGS(q, rq)
++)
++
++/**
++ * block_rq_issue - issue pending block IO request operation to device driver
++ * @q: queue holding operation
++ * @rq: block IO operation operation request
++ *
++ * Called when block operation request @rq from queue @q is sent to a
++ * device driver for processing.
++ */
++DEFINE_EVENT(block_rq, block_rq_issue,
++
++ TP_PROTO(struct request_queue *q, struct request *rq),
++
++ TP_ARGS(q, rq)
++)
++
++/**
++ * block_bio_bounce - used bounce buffer when processing block operation
++ * @q: queue holding the block operation
++ * @bio: block operation
++ *
++ * A bounce buffer was used to handle the block operation @bio in @q.
++ * This occurs when hardware limitations prevent a direct transfer of
++ * data between the @bio data memory area and the IO device. Use of a
++ * bounce buffer requires extra copying of data and decreases
++ * performance.
++ */
++TRACE_EVENT(block_bio_bounce,
++
++ TP_PROTO(struct request_queue *q, struct bio *bio),
++
++ TP_ARGS(q, bio),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( unsigned int, nr_sector )
++ __field( unsigned int, rwbs )
++ __array_text( char, comm, TASK_COMM_LEN )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, bio->bi_bdev ?
++ bio->bi_bdev->bd_dev : 0)
++ tp_assign(sector, bio->bi_sector)
++ tp_assign(nr_sector, bio->bi_size >> 9)
++ blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
++ tp_memcpy(comm, current->comm, TASK_COMM_LEN)
++ ),
++
++ TP_printk("%d,%d %s %llu + %u [%s]",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __print_rwbs_flags(__entry->rwbs),
++ (unsigned long long)__entry->sector,
++ __entry->nr_sector, __entry->comm)
++)
++
++/**
++ * block_bio_complete - completed all work on the block operation
++ * @q: queue holding the block operation
++ * @bio: block operation completed
++ * @error: io error value
++ *
++ * This tracepoint indicates there is no further work to do on this
++ * block IO operation @bio.
++ */
++TRACE_EVENT(block_bio_complete,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
++ TP_PROTO(struct request_queue *q, struct bio *bio, int error),
++
++ TP_ARGS(q, bio, error),
++#else
++ TP_PROTO(struct request_queue *q, struct bio *bio),
++
++ TP_ARGS(q, bio),
++#endif
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( unsigned, nr_sector )
++ __field( int, error )
++ __field( unsigned int, rwbs )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, bio->bi_bdev->bd_dev)
++ tp_assign(sector, bio->bi_sector)
++ tp_assign(nr_sector, bio->bi_size >> 9)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
++ tp_assign(error, error)
++#else
++ tp_assign(error, 0)
++#endif
++ blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
++ ),
++
++ TP_printk("%d,%d %s %llu + %u [%d]",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __print_rwbs_flags(__entry->rwbs),
++ (unsigned long long)__entry->sector,
++ __entry->nr_sector, __entry->error)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
++DECLARE_EVENT_CLASS(block_bio_merge,
++
++ TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
++
++ TP_ARGS(q, rq, bio),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( unsigned int, nr_sector )
++ __field( unsigned int, rwbs )
++ __array_text( char, comm, TASK_COMM_LEN )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, bio->bi_bdev->bd_dev)
++ tp_assign(sector, bio->bi_sector)
++ tp_assign(nr_sector, bio->bi_size >> 9)
++ blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
++ tp_memcpy(comm, current->comm, TASK_COMM_LEN)
++ ),
++
++ TP_printk("%d,%d %s %llu + %u [%s]",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __print_rwbs_flags(__entry->rwbs),
++ (unsigned long long)__entry->sector,
++ __entry->nr_sector, __entry->comm)
++)
++
++/**
++ * block_bio_backmerge - merging block operation to the end of an existing operation
++ * @q: queue holding operation
++ * @bio: new block operation to merge
++ *
++ * Merging block request @bio to the end of an existing block request
++ * in queue @q.
++ */
++DEFINE_EVENT(block_bio_merge, block_bio_backmerge,
++
++ TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
++
++ TP_ARGS(q, rq, bio)
++)
++
++/**
++ * block_bio_frontmerge - merging block operation to the beginning of an existing operation
++ * @q: queue holding operation
++ * @bio: new block operation to merge
++ *
++ * Merging block IO operation @bio to the beginning of an existing block
++ * operation in queue @q.
++ */
++DEFINE_EVENT(block_bio_merge, block_bio_frontmerge,
++
++ TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
++
++ TP_ARGS(q, rq, bio)
++)
++
++/**
++ * block_bio_queue - putting new block IO operation in queue
++ * @q: queue holding operation
++ * @bio: new block operation
++ *
++ * About to place the block IO operation @bio into queue @q.
++ */
++TRACE_EVENT(block_bio_queue,
++
++ TP_PROTO(struct request_queue *q, struct bio *bio),
++
++ TP_ARGS(q, bio),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( unsigned int, nr_sector )
++ __array( char, rwbs, RWBS_LEN )
++ __array( char, comm, TASK_COMM_LEN )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, bio->bi_bdev->bd_dev)
++ tp_assign(sector, bio->bi_sector)
++ tp_assign(nr_sector, bio->bi_size >> 9)
++ blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
++ tp_memcpy(comm, current->comm, TASK_COMM_LEN)
++ ),
++
++ TP_printk("%d,%d %s %llu + %u [%s]",
++ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
++ (unsigned long long)__entry->sector,
++ __entry->nr_sector, __entry->comm)
++)
++#else
++DECLARE_EVENT_CLASS(block_bio,
++
++ TP_PROTO(struct request_queue *q, struct bio *bio),
++
++ TP_ARGS(q, bio),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( unsigned int, nr_sector )
++ __field( unsigned int, rwbs )
++ __array_text( char, comm, TASK_COMM_LEN )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, bio->bi_bdev ? bio->bi_bdev->bd_dev : 0)
++ tp_assign(sector, bio->bi_sector)
++ tp_assign(nr_sector, bio->bi_size >> 9)
++ blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
++ tp_memcpy(comm, current->comm, TASK_COMM_LEN)
++ ),
++
++ TP_printk("%d,%d %s %llu + %u [%s]",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __print_rwbs_flags(__entry->rwbs),
++ (unsigned long long)__entry->sector,
++ __entry->nr_sector, __entry->comm)
++)
++
++/**
++ * block_bio_backmerge - merging block operation to the end of an existing operation
++ * @q: queue holding operation
++ * @bio: new block operation to merge
++ *
++ * Merging block request @bio to the end of an existing block request
++ * in queue @q.
++ */
++DEFINE_EVENT(block_bio, block_bio_backmerge,
++
++ TP_PROTO(struct request_queue *q, struct bio *bio),
++
++ TP_ARGS(q, bio)
++)
++
++/**
++ * block_bio_frontmerge - merging block operation to the beginning of an existing operation
++ * @q: queue holding operation
++ * @bio: new block operation to merge
++ *
++ * Merging block IO operation @bio to the beginning of an existing block
++ * operation in queue @q.
++ */
++DEFINE_EVENT(block_bio, block_bio_frontmerge,
++
++ TP_PROTO(struct request_queue *q, struct bio *bio),
++
++ TP_ARGS(q, bio)
++)
++
++/**
++ * block_bio_queue - putting new block IO operation in queue
++ * @q: queue holding operation
++ * @bio: new block operation
++ *
++ * About to place the block IO operation @bio into queue @q.
++ */
++DEFINE_EVENT(block_bio, block_bio_queue,
++
++ TP_PROTO(struct request_queue *q, struct bio *bio),
++
++ TP_ARGS(q, bio)
++)
++#endif
++
++DECLARE_EVENT_CLASS(block_get_rq,
++
++ TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
++
++ TP_ARGS(q, bio, rw),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( unsigned int, nr_sector )
++ __field( unsigned int, rwbs )
++ __array_text( char, comm, TASK_COMM_LEN )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, bio ? bio->bi_bdev->bd_dev : 0)
++ tp_assign(sector, bio ? bio->bi_sector : 0)
++ tp_assign(nr_sector, bio ? bio->bi_size >> 9 : 0)
++ blk_fill_rwbs(rwbs, bio ? bio->bi_rw : 0,
++ bio ? bio->bi_size >> 9 : 0)
++ tp_memcpy(comm, current->comm, TASK_COMM_LEN)
++ ),
++
++ TP_printk("%d,%d %s %llu + %u [%s]",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __print_rwbs_flags(__entry->rwbs),
++ (unsigned long long)__entry->sector,
++ __entry->nr_sector, __entry->comm)
++)
++
++/**
++ * block_getrq - get a free request entry in queue for block IO operations
++ * @q: queue for operations
++ * @bio: pending block IO operation
++ * @rw: low bit indicates a read (%0) or a write (%1)
++ *
++ * A request struct for queue @q has been allocated to handle the
++ * block IO operation @bio.
++ */
++DEFINE_EVENT(block_get_rq, block_getrq,
++
++ TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
++
++ TP_ARGS(q, bio, rw)
++)
++
++/**
++ * block_sleeprq - waiting to get a free request entry in queue for block IO operation
++ * @q: queue for operation
++ * @bio: pending block IO operation
++ * @rw: low bit indicates a read (%0) or a write (%1)
++ *
++ * In the case where a request struct cannot be provided for queue @q
++ * the process needs to wait for an request struct to become
++ * available. This tracepoint event is generated each time the
++ * process goes to sleep waiting for request struct become available.
++ */
++DEFINE_EVENT(block_get_rq, block_sleeprq,
++
++ TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
++
++ TP_ARGS(q, bio, rw)
++)
++
++/**
++ * block_plug - keep operations requests in request queue
++ * @q: request queue to plug
++ *
++ * Plug the request queue @q. Do not allow block operation requests
++ * to be sent to the device driver. Instead, accumulate requests in
++ * the queue to improve throughput performance of the block device.
++ */
++TRACE_EVENT(block_plug,
++
++ TP_PROTO(struct request_queue *q),
++
++ TP_ARGS(q),
++
++ TP_STRUCT__entry(
++ __array_text( char, comm, TASK_COMM_LEN )
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(comm, current->comm, TASK_COMM_LEN)
++ ),
++
++ TP_printk("[%s]", __entry->comm)
++)
++
++DECLARE_EVENT_CLASS(block_unplug,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
++
++ TP_ARGS(q, depth, explicit),
++#else
++ TP_PROTO(struct request_queue *q),
++
++ TP_ARGS(q),
++#endif
++
++ TP_STRUCT__entry(
++ __field( int, nr_rq )
++ __array_text( char, comm, TASK_COMM_LEN )
++ ),
++
++ TP_fast_assign(
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ tp_assign(nr_rq, depth)
++#else
++ tp_assign(nr_rq, q->rq.count[READ] + q->rq.count[WRITE])
++#endif
++ tp_memcpy(comm, current->comm, TASK_COMM_LEN)
++ ),
++
++ TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
++)
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
++/**
++ * block_unplug_timer - timed release of operations requests in queue to device driver
++ * @q: request queue to unplug
++ *
++ * Unplug the request queue @q because a timer expired and allow block
++ * operation requests to be sent to the device driver.
++ */
++DEFINE_EVENT(block_unplug, block_unplug_timer,
++
++ TP_PROTO(struct request_queue *q),
++
++ TP_ARGS(q)
++)
++#endif
++
++/**
++ * block_unplug - release of operations requests in request queue
++ * @q: request queue to unplug
++ * @depth: number of requests just added to the queue
++ * @explicit: whether this was an explicit unplug, or one from schedule()
++ *
++ * Unplug request queue @q because device driver is scheduled to work
++ * on elements in the request queue.
++ */
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++DEFINE_EVENT(block_unplug, block_unplug,
++#else
++DEFINE_EVENT(block_unplug, block_unplug_io,
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
++
++ TP_ARGS(q, depth, explicit)
++#else
++ TP_PROTO(struct request_queue *q),
++
++ TP_ARGS(q)
++#endif
++)
++
++/**
++ * block_split - split a single bio struct into two bio structs
++ * @q: queue containing the bio
++ * @bio: block operation being split
++ * @new_sector: The starting sector for the new bio
++ *
++ * The bio request @bio in request queue @q needs to be split into two
++ * bio requests. The newly created @bio request starts at
++ * @new_sector. This split may be required due to hardware limitation
++ * such as operation crossing device boundaries in a RAID system.
++ */
++TRACE_EVENT(block_split,
++
++ TP_PROTO(struct request_queue *q, struct bio *bio,
++ unsigned int new_sector),
++
++ TP_ARGS(q, bio, new_sector),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( sector_t, new_sector )
++ __field( unsigned int, rwbs )
++ __array_text( char, comm, TASK_COMM_LEN )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, bio->bi_bdev->bd_dev)
++ tp_assign(sector, bio->bi_sector)
++ tp_assign(new_sector, new_sector)
++ blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
++ tp_memcpy(comm, current->comm, TASK_COMM_LEN)
++ ),
++
++ TP_printk("%d,%d %s %llu / %llu [%s]",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __print_rwbs_flags(__entry->rwbs),
++ (unsigned long long)__entry->sector,
++ (unsigned long long)__entry->new_sector,
++ __entry->comm)
++)
++
++/**
++ * block_bio_remap - map request for a logical device to the raw device
++ * @q: queue holding the operation
++ * @bio: revised operation
++ * @dev: device for the operation
++ * @from: original sector for the operation
++ *
++ * An operation for a logical device has been mapped to the
++ * raw block device.
++ */
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
++TRACE_EVENT(block_bio_remap,
++#else
++TRACE_EVENT(block_remap,
++#endif
++
++ TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
++ sector_t from),
++
++ TP_ARGS(q, bio, dev, from),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( unsigned int, nr_sector )
++ __field( dev_t, old_dev )
++ __field( sector_t, old_sector )
++ __field( unsigned int, rwbs )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, bio->bi_bdev->bd_dev)
++ tp_assign(sector, bio->bi_sector)
++ tp_assign(nr_sector, bio->bi_size >> 9)
++ tp_assign(old_dev, dev)
++ tp_assign(old_sector, from)
++ blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
++ ),
++
++ TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __print_rwbs_flags(__entry->rwbs),
++ (unsigned long long)__entry->sector,
++ __entry->nr_sector,
++ MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
++ (unsigned long long)__entry->old_sector)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
++/**
++ * block_rq_remap - map request for a block operation request
++ * @q: queue holding the operation
++ * @rq: block IO operation request
++ * @dev: device for the operation
++ * @from: original sector for the operation
++ *
++ * The block operation request @rq in @q has been remapped. The block
++ * operation request @rq holds the current information and @from hold
++ * the original sector.
++ */
++TRACE_EVENT(block_rq_remap,
++
++ TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
++ sector_t from),
++
++ TP_ARGS(q, rq, dev, from),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( unsigned int, nr_sector )
++ __field( dev_t, old_dev )
++ __field( sector_t, old_sector )
++ __field( unsigned int, rwbs )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, disk_devt(rq->rq_disk))
++ tp_assign(sector, blk_rq_pos(rq))
++ tp_assign(nr_sector, blk_rq_sectors(rq))
++ tp_assign(old_dev, dev)
++ tp_assign(old_sector, from)
++ blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq))
++ ),
++
++ TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __print_rwbs_flags(__entry->rwbs),
++ (unsigned long long)__entry->sector,
++ __entry->nr_sector,
++ MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
++ (unsigned long long)__entry->old_sector)
++)
++#endif
++
++#undef __print_rwbs_flags
++#undef blk_fill_rwbs
++
++#endif /* _TRACE_BLOCK_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
++
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/btrfs.h
+@@ -0,0 +1,1117 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM btrfs
++
++#if !defined(_TRACE_BTRFS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_BTRFS_H
++
++#include <linux/writeback.h>
++#include <linux/tracepoint.h>
++#include <trace/events/gfpflags.h>
++#include <linux/version.h>
++
++#ifndef _TRACE_BTRFS_DEF_
++#define _TRACE_BTRFS_DEF_
++struct btrfs_root;
++struct btrfs_fs_info;
++struct btrfs_inode;
++struct extent_map;
++struct btrfs_ordered_extent;
++struct btrfs_delayed_ref_node;
++struct btrfs_delayed_tree_ref;
++struct btrfs_delayed_data_ref;
++struct btrfs_delayed_ref_head;
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++struct btrfs_block_group_cache;
++struct btrfs_free_cluster;
++#endif
++struct map_lookup;
++struct extent_buffer;
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++struct extent_state;
++#endif
++#endif
++
++#define show_ref_type(type) \
++ __print_symbolic(type, \
++ { BTRFS_TREE_BLOCK_REF_KEY, "TREE_BLOCK_REF" }, \
++ { BTRFS_EXTENT_DATA_REF_KEY, "EXTENT_DATA_REF" }, \
++ { BTRFS_EXTENT_REF_V0_KEY, "EXTENT_REF_V0" }, \
++ { BTRFS_SHARED_BLOCK_REF_KEY, "SHARED_BLOCK_REF" }, \
++ { BTRFS_SHARED_DATA_REF_KEY, "SHARED_DATA_REF" })
++
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
++#define __show_root_type(obj) \
++ __print_symbolic_u64(obj, \
++ { BTRFS_ROOT_TREE_OBJECTID, "ROOT_TREE" }, \
++ { BTRFS_EXTENT_TREE_OBJECTID, "EXTENT_TREE" }, \
++ { BTRFS_CHUNK_TREE_OBJECTID, "CHUNK_TREE" }, \
++ { BTRFS_DEV_TREE_OBJECTID, "DEV_TREE" }, \
++ { BTRFS_FS_TREE_OBJECTID, "FS_TREE" }, \
++ { BTRFS_ROOT_TREE_DIR_OBJECTID, "ROOT_TREE_DIR" }, \
++ { BTRFS_CSUM_TREE_OBJECTID, "CSUM_TREE" }, \
++ { BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" }, \
++ { BTRFS_QUOTA_TREE_OBJECTID, "QUOTA_TREE" }, \
++ { BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \
++ { BTRFS_UUID_TREE_OBJECTID, "UUID_RELOC" }, \
++ { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
++#define __show_root_type(obj) \
++ __print_symbolic_u64(obj, \
++ { BTRFS_ROOT_TREE_OBJECTID, "ROOT_TREE" }, \
++ { BTRFS_EXTENT_TREE_OBJECTID, "EXTENT_TREE" }, \
++ { BTRFS_CHUNK_TREE_OBJECTID, "CHUNK_TREE" }, \
++ { BTRFS_DEV_TREE_OBJECTID, "DEV_TREE" }, \
++ { BTRFS_FS_TREE_OBJECTID, "FS_TREE" }, \
++ { BTRFS_ROOT_TREE_DIR_OBJECTID, "ROOT_TREE_DIR" }, \
++ { BTRFS_CSUM_TREE_OBJECTID, "CSUM_TREE" }, \
++ { BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" }, \
++ { BTRFS_QUOTA_TREE_OBJECTID, "QUOTA_TREE" }, \
++ { BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \
++ { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,40))
++#define __show_root_type(obj) \
++ __print_symbolic_u64(obj, \
++ { BTRFS_ROOT_TREE_OBJECTID, "ROOT_TREE" }, \
++ { BTRFS_EXTENT_TREE_OBJECTID, "EXTENT_TREE" }, \
++ { BTRFS_CHUNK_TREE_OBJECTID, "CHUNK_TREE" }, \
++ { BTRFS_DEV_TREE_OBJECTID, "DEV_TREE" }, \
++ { BTRFS_FS_TREE_OBJECTID, "FS_TREE" }, \
++ { BTRFS_ROOT_TREE_DIR_OBJECTID, "ROOT_TREE_DIR" }, \
++ { BTRFS_CSUM_TREE_OBJECTID, "CSUM_TREE" }, \
++ { BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" }, \
++ { BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \
++ { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
++#else
++#define __show_root_type(obj) \
++ __print_symbolic(obj, \
++ { BTRFS_ROOT_TREE_OBJECTID, "ROOT_TREE" }, \
++ { BTRFS_EXTENT_TREE_OBJECTID, "EXTENT_TREE" }, \
++ { BTRFS_CHUNK_TREE_OBJECTID, "CHUNK_TREE" }, \
++ { BTRFS_DEV_TREE_OBJECTID, "DEV_TREE" }, \
++ { BTRFS_FS_TREE_OBJECTID, "FS_TREE" }, \
++ { BTRFS_ROOT_TREE_DIR_OBJECTID, "ROOT_TREE_DIR" }, \
++ { BTRFS_CSUM_TREE_OBJECTID, "CSUM_TREE" }, \
++ { BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" }, \
++ { BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \
++ { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
++#define show_root_type(obj) \
++ obj, ((obj >= BTRFS_DATA_RELOC_TREE_OBJECTID) || \
++ (obj >= BTRFS_ROOT_TREE_OBJECTID && \
++ obj <= BTRFS_QUOTA_TREE_OBJECTID)) ? __show_root_type(obj) : "-"
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
++#define show_root_type(obj) \
++ obj, ((obj >= BTRFS_DATA_RELOC_TREE_OBJECTID) || \
++ (obj >= BTRFS_ROOT_TREE_OBJECTID && \
++ obj <= BTRFS_CSUM_TREE_OBJECTID)) ? __show_root_type(obj) : "-"
++#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) */
++#define show_root_type(obj) \
++ obj, ((obj >= BTRFS_DATA_RELOC_TREE_OBJECTID) || \
++ (obj <= BTRFS_CSUM_TREE_OBJECTID )) ? __show_root_type(obj) : "-"
++#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) */
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
++
++#define BTRFS_GROUP_FLAGS \
++ { BTRFS_BLOCK_GROUP_DATA, "DATA"}, \
++ { BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \
++ { BTRFS_BLOCK_GROUP_METADATA, "METADATA"}, \
++ { BTRFS_BLOCK_GROUP_RAID0, "RAID0"}, \
++ { BTRFS_BLOCK_GROUP_RAID1, "RAID1"}, \
++ { BTRFS_BLOCK_GROUP_DUP, "DUP"}, \
++ { BTRFS_BLOCK_GROUP_RAID10, "RAID10"}, \
++ { BTRFS_BLOCK_GROUP_RAID5, "RAID5"}, \
++ { BTRFS_BLOCK_GROUP_RAID6, "RAID6"}
++
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++
++#define BTRFS_GROUP_FLAGS \
++ { BTRFS_BLOCK_GROUP_DATA, "DATA"}, \
++ { BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \
++ { BTRFS_BLOCK_GROUP_METADATA, "METADATA"}, \
++ { BTRFS_BLOCK_GROUP_RAID0, "RAID0"}, \
++ { BTRFS_BLOCK_GROUP_RAID1, "RAID1"}, \
++ { BTRFS_BLOCK_GROUP_DUP, "DUP"}, \
++ { BTRFS_BLOCK_GROUP_RAID10, "RAID10"}
++
++#define BTRFS_UUID_SIZE 16
++
++#endif
++
++TRACE_EVENT(btrfs_transaction_commit,
++
++ TP_PROTO(struct btrfs_root *root),
++
++ TP_ARGS(root),
++
++ TP_STRUCT__entry(
++ __field( u64, generation )
++ __field( u64, root_objectid )
++ ),
++
++ TP_fast_assign(
++ tp_assign(generation, root->fs_info->generation)
++ tp_assign(root_objectid, root->root_key.objectid)
++ ),
++
++ TP_printk("root = %llu(%s), gen = %llu",
++ show_root_type(__entry->root_objectid),
++ (unsigned long long)__entry->generation)
++)
++
++DECLARE_EVENT_CLASS(btrfs__inode,
++
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( blkcnt_t, blocks )
++ __field( u64, disk_i_size )
++ __field( u64, generation )
++ __field( u64, last_trans )
++ __field( u64, logged_trans )
++ __field( u64, root_objectid )
++ ),
++
++ TP_fast_assign(
++ tp_assign(ino, inode->i_ino)
++ tp_assign(blocks, inode->i_blocks)
++ tp_assign(disk_i_size, BTRFS_I(inode)->disk_i_size)
++ tp_assign(generation, BTRFS_I(inode)->generation)
++ tp_assign(last_trans, BTRFS_I(inode)->last_trans)
++ tp_assign(logged_trans, BTRFS_I(inode)->logged_trans)
++ tp_assign(root_objectid,
++ BTRFS_I(inode)->root->root_key.objectid)
++ ),
++
++ TP_printk("root = %llu(%s), gen = %llu, ino = %lu, blocks = %llu, "
++ "disk_i_size = %llu, last_trans = %llu, logged_trans = %llu",
++ show_root_type(__entry->root_objectid),
++ (unsigned long long)__entry->generation,
++ (unsigned long)__entry->ino,
++ (unsigned long long)__entry->blocks,
++ (unsigned long long)__entry->disk_i_size,
++ (unsigned long long)__entry->last_trans,
++ (unsigned long long)__entry->logged_trans)
++)
++
++DEFINE_EVENT(btrfs__inode, btrfs_inode_new,
++
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode)
++)
++
++DEFINE_EVENT(btrfs__inode, btrfs_inode_request,
++
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode)
++)
++
++DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
++
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,40))
++#define __show_map_type(type) \
++ __print_symbolic_u64(type, \
++ { EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \
++ { EXTENT_MAP_HOLE, "HOLE" }, \
++ { EXTENT_MAP_INLINE, "INLINE" }, \
++ { EXTENT_MAP_DELALLOC, "DELALLOC" })
++#else
++#define __show_map_type(type) \
++ __print_symbolic(type, \
++ { EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \
++ { EXTENT_MAP_HOLE, "HOLE" }, \
++ { EXTENT_MAP_INLINE, "INLINE" }, \
++ { EXTENT_MAP_DELALLOC, "DELALLOC" })
++#endif
++
++#define show_map_type(type) \
++ type, (type >= EXTENT_MAP_LAST_BYTE) ? "-" : __show_map_type(type)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
++
++#define show_map_flags(flag) \
++ __print_flags(flag, "|", \
++ { EXTENT_FLAG_PINNED, "PINNED" }, \
++ { EXTENT_FLAG_COMPRESSED, "COMPRESSED" }, \
++ { EXTENT_FLAG_VACANCY, "VACANCY" }, \
++ { EXTENT_FLAG_PREALLOC, "PREALLOC" }, \
++ { EXTENT_FLAG_LOGGING, "LOGGING" }, \
++ { EXTENT_FLAG_FILLING, "FILLING" })
++
++#else
++
++#define show_map_flags(flag) \
++ __print_flags(flag, "|", \
++ { EXTENT_FLAG_PINNED, "PINNED" }, \
++ { EXTENT_FLAG_COMPRESSED, "COMPRESSED" }, \
++ { EXTENT_FLAG_VACANCY, "VACANCY" }, \
++ { EXTENT_FLAG_PREALLOC, "PREALLOC" })
++
++#endif
++
++TRACE_EVENT(btrfs_get_extent,
++
++ TP_PROTO(struct btrfs_root *root, struct extent_map *map),
++
++ TP_ARGS(root, map),
++
++ TP_STRUCT__entry(
++ __field( u64, root_objectid )
++ __field( u64, start )
++ __field( u64, len )
++ __field( u64, orig_start )
++ __field( u64, block_start )
++ __field( u64, block_len )
++ __field( unsigned long, flags )
++ __field( int, refs )
++ __field( unsigned int, compress_type )
++ ),
++
++ TP_fast_assign(
++ tp_assign(root_objectid, root->root_key.objectid)
++ tp_assign(start, map->start)
++ tp_assign(len, map->len)
++ tp_assign(orig_start, map->orig_start)
++ tp_assign(block_start, map->block_start)
++ tp_assign(block_len, map->block_len)
++ tp_assign(flags, map->flags)
++ tp_assign(refs, atomic_read(&map->refs))
++ tp_assign(compress_type, map->compress_type)
++ ),
++
++ TP_printk("root = %llu(%s), start = %llu, len = %llu, "
++ "orig_start = %llu, block_start = %llu(%s), "
++ "block_len = %llu, flags = %s, refs = %u, "
++ "compress_type = %u",
++ show_root_type(__entry->root_objectid),
++ (unsigned long long)__entry->start,
++ (unsigned long long)__entry->len,
++ (unsigned long long)__entry->orig_start,
++ show_map_type(__entry->block_start),
++ (unsigned long long)__entry->block_len,
++ show_map_flags(__entry->flags),
++ __entry->refs, __entry->compress_type)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
++
++#define show_ordered_flags(flags) \
++ __print_symbolic(flags, \
++ { BTRFS_ORDERED_IO_DONE, "IO_DONE" }, \
++ { BTRFS_ORDERED_COMPLETE, "COMPLETE" }, \
++ { BTRFS_ORDERED_NOCOW, "NOCOW" }, \
++ { BTRFS_ORDERED_COMPRESSED, "COMPRESSED" }, \
++ { BTRFS_ORDERED_PREALLOC, "PREALLOC" }, \
++ { BTRFS_ORDERED_DIRECT, "DIRECT" }, \
++ { BTRFS_ORDERED_IOERR, "IOERR" }, \
++ { BTRFS_ORDERED_UPDATED_ISIZE, "UPDATED_ISIZE" }, \
++ { BTRFS_ORDERED_LOGGED_CSUM, "LOGGED_CSUM" })
++
++#else
++
++#define show_ordered_flags(flags) \
++ __print_symbolic(flags, \
++ { BTRFS_ORDERED_IO_DONE, "IO_DONE" }, \
++ { BTRFS_ORDERED_COMPLETE, "COMPLETE" }, \
++ { BTRFS_ORDERED_NOCOW, "NOCOW" }, \
++ { BTRFS_ORDERED_COMPRESSED, "COMPRESSED" }, \
++ { BTRFS_ORDERED_PREALLOC, "PREALLOC" }, \
++ { BTRFS_ORDERED_DIRECT, "DIRECT" })
++
++#endif
++
++DECLARE_EVENT_CLASS(btrfs__ordered_extent,
++
++ TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
++
++ TP_ARGS(inode, ordered),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( u64, file_offset )
++ __field( u64, start )
++ __field( u64, len )
++ __field( u64, disk_len )
++ __field( u64, bytes_left )
++ __field( unsigned long, flags )
++ __field( int, compress_type )
++ __field( int, refs )
++ __field( u64, root_objectid )
++ ),
++
++ TP_fast_assign(
++ tp_assign(ino, inode->i_ino)
++ tp_assign(file_offset, ordered->file_offset)
++ tp_assign(start, ordered->start)
++ tp_assign(len, ordered->len)
++ tp_assign(disk_len, ordered->disk_len)
++ tp_assign(bytes_left, ordered->bytes_left)
++ tp_assign(flags, ordered->flags)
++ tp_assign(compress_type, ordered->compress_type)
++ tp_assign(refs, atomic_read(&ordered->refs))
++ tp_assign(root_objectid,
++ BTRFS_I(inode)->root->root_key.objectid)
++ ),
++
++ TP_printk("root = %llu(%s), ino = %llu, file_offset = %llu, "
++ "start = %llu, len = %llu, disk_len = %llu, "
++ "bytes_left = %llu, flags = %s, compress_type = %d, "
++ "refs = %d",
++ show_root_type(__entry->root_objectid),
++ (unsigned long long)__entry->ino,
++ (unsigned long long)__entry->file_offset,
++ (unsigned long long)__entry->start,
++ (unsigned long long)__entry->len,
++ (unsigned long long)__entry->disk_len,
++ (unsigned long long)__entry->bytes_left,
++ show_ordered_flags(__entry->flags),
++ __entry->compress_type, __entry->refs)
++)
++
++DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_add,
++
++ TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
++
++ TP_ARGS(inode, ordered)
++)
++
++DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_remove,
++
++ TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
++
++ TP_ARGS(inode, ordered)
++)
++
++DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_start,
++
++ TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
++
++ TP_ARGS(inode, ordered)
++)
++
++DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_put,
++
++ TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
++
++ TP_ARGS(inode, ordered)
++)
++
++DECLARE_EVENT_CLASS(btrfs__writepage,
++
++ TP_PROTO(struct page *page, struct inode *inode,
++ struct writeback_control *wbc),
++
++ TP_ARGS(page, inode, wbc),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( pgoff_t, index )
++ __field( long, nr_to_write )
++ __field( long, pages_skipped )
++ __field( loff_t, range_start )
++ __field( loff_t, range_end )
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
++ __field( char, nonblocking )
++#endif
++ __field( char, for_kupdate )
++ __field( char, for_reclaim )
++ __field( char, range_cyclic )
++ __field( pgoff_t, writeback_index )
++ __field( u64, root_objectid )
++ ),
++
++ TP_fast_assign(
++ tp_assign(ino, inode->i_ino)
++ tp_assign(index, page->index)
++ tp_assign(nr_to_write, wbc->nr_to_write)
++ tp_assign(pages_skipped, wbc->pages_skipped)
++ tp_assign(range_start, wbc->range_start)
++ tp_assign(range_end, wbc->range_end)
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
++ tp_assign(nonblocking, wbc->nonblocking)
++#endif
++ tp_assign(for_kupdate, wbc->for_kupdate)
++ tp_assign(for_reclaim, wbc->for_reclaim)
++ tp_assign(range_cyclic, wbc->range_cyclic)
++ tp_assign(writeback_index, inode->i_mapping->writeback_index)
++ tp_assign(root_objectid,
++ BTRFS_I(inode)->root->root_key.objectid)
++ ),
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
++ TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, "
++ "nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, "
++ "range_end = %llu, nonblocking = %d, for_kupdate = %d, "
++ "for_reclaim = %d, range_cyclic = %d, writeback_index = %lu",
++ show_root_type(__entry->root_objectid),
++ (unsigned long)__entry->ino, __entry->index,
++ __entry->nr_to_write, __entry->pages_skipped,
++ __entry->range_start, __entry->range_end,
++ __entry->nonblocking, __entry->for_kupdate,
++ __entry->for_reclaim, __entry->range_cyclic,
++ (unsigned long)__entry->writeback_index)
++#else
++ TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, "
++ "nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, "
++ "range_end = %llu, for_kupdate = %d, "
++ "for_reclaim = %d, range_cyclic = %d, writeback_index = %lu",
++ show_root_type(__entry->root_objectid),
++ (unsigned long)__entry->ino, __entry->index,
++ __entry->nr_to_write, __entry->pages_skipped,
++ __entry->range_start, __entry->range_end,
++ __entry->for_kupdate,
++ __entry->for_reclaim, __entry->range_cyclic,
++ (unsigned long)__entry->writeback_index)
++#endif
++)
++
++DEFINE_EVENT(btrfs__writepage, __extent_writepage,
++
++ TP_PROTO(struct page *page, struct inode *inode,
++ struct writeback_control *wbc),
++
++ TP_ARGS(page, inode, wbc)
++)
++
++TRACE_EVENT(btrfs_writepage_end_io_hook,
++
++ TP_PROTO(struct page *page, u64 start, u64 end, int uptodate),
++
++ TP_ARGS(page, start, end, uptodate),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( pgoff_t, index )
++ __field( u64, start )
++ __field( u64, end )
++ __field( int, uptodate )
++ __field( u64, root_objectid )
++ ),
++
++ TP_fast_assign(
++ tp_assign(ino, page->mapping->host->i_ino)
++ tp_assign(index, page->index)
++ tp_assign(start, start)
++ tp_assign(end, end)
++ tp_assign(uptodate, uptodate)
++ tp_assign(root_objectid,
++ BTRFS_I(page->mapping->host)->root->root_key.objectid)
++ ),
++
++ TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, start = %llu, "
++ "end = %llu, uptodate = %d",
++ show_root_type(__entry->root_objectid),
++ (unsigned long)__entry->ino, (unsigned long)__entry->index,
++ (unsigned long long)__entry->start,
++ (unsigned long long)__entry->end, __entry->uptodate)
++)
++
++TRACE_EVENT(btrfs_sync_file,
++
++ TP_PROTO(struct file *file, int datasync),
++
++ TP_ARGS(file, datasync),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( ino_t, parent )
++ __field( int, datasync )
++ __field( u64, root_objectid )
++ ),
++
++ TP_fast_assign(
++ tp_assign(ino, file->f_path.dentry->d_inode->i_ino)
++ tp_assign(parent, file->f_path.dentry->d_parent->d_inode->i_ino)
++ tp_assign(datasync, datasync)
++ tp_assign(root_objectid,
++ BTRFS_I(file->f_path.dentry->d_inode)->root->root_key.objectid)
++ ),
++
++ TP_printk("root = %llu(%s), ino = %ld, parent = %ld, datasync = %d",
++ show_root_type(__entry->root_objectid),
++ (unsigned long)__entry->ino, (unsigned long)__entry->parent,
++ __entry->datasync)
++)
++
++TRACE_EVENT(btrfs_sync_fs,
++
++ TP_PROTO(int wait),
++
++ TP_ARGS(wait),
++
++ TP_STRUCT__entry(
++ __field( int, wait )
++ ),
++
++ TP_fast_assign(
++ tp_assign(wait, wait)
++ ),
++
++ TP_printk("wait = %d", __entry->wait)
++)
++
++#define show_ref_action(action) \
++ __print_symbolic(action, \
++ { BTRFS_ADD_DELAYED_REF, "ADD_DELAYED_REF" }, \
++ { BTRFS_DROP_DELAYED_REF, "DROP_DELAYED_REF" }, \
++ { BTRFS_ADD_DELAYED_EXTENT, "ADD_DELAYED_EXTENT" }, \
++ { BTRFS_UPDATE_DELAYED_HEAD, "UPDATE_DELAYED_HEAD" })
++
++
++TRACE_EVENT(btrfs_delayed_tree_ref,
++
++ TP_PROTO(struct btrfs_delayed_ref_node *ref,
++ struct btrfs_delayed_tree_ref *full_ref,
++ int action),
++
++ TP_ARGS(ref, full_ref, action),
++
++ TP_STRUCT__entry(
++ __field( u64, bytenr )
++ __field( u64, num_bytes )
++ __field( int, action )
++ __field( u64, parent )
++ __field( u64, ref_root )
++ __field( int, level )
++ __field( int, type )
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
++ __field( u64, seq )
++#endif
++ ),
++
++ TP_fast_assign(
++ tp_assign(bytenr, ref->bytenr)
++ tp_assign(num_bytes, ref->num_bytes)
++ tp_assign(action, action)
++ tp_assign(parent, full_ref->parent)
++ tp_assign(ref_root, full_ref->root)
++ tp_assign(level, full_ref->level)
++ tp_assign(type, ref->type)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
++ tp_assign(seq, ref->seq)
++#endif
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
++ TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, "
++ "parent = %llu(%s), ref_root = %llu(%s), level = %d, "
++ "type = %s, seq = %llu",
++#else
++ TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, "
++ "parent = %llu(%s), ref_root = %llu(%s), level = %d, "
++ "type = %s",
++#endif
++ (unsigned long long)__entry->bytenr,
++ (unsigned long long)__entry->num_bytes,
++ show_ref_action(__entry->action),
++ show_root_type(__entry->parent),
++ show_root_type(__entry->ref_root),
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
++ __entry->level, show_ref_type(__entry->type),
++ (unsigned long long)__entry->seq)
++#else
++ __entry->level, show_ref_type(__entry->type))
++#endif
++)
++
++TRACE_EVENT(btrfs_delayed_data_ref,
++
++ TP_PROTO(struct btrfs_delayed_ref_node *ref,
++ struct btrfs_delayed_data_ref *full_ref,
++ int action),
++
++ TP_ARGS(ref, full_ref, action),
++
++ TP_STRUCT__entry(
++ __field( u64, bytenr )
++ __field( u64, num_bytes )
++ __field( int, action )
++ __field( u64, parent )
++ __field( u64, ref_root )
++ __field( u64, owner )
++ __field( u64, offset )
++ __field( int, type )
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
++ __field( u64, seq )
++#endif
++ ),
++
++ TP_fast_assign(
++ tp_assign(bytenr, ref->bytenr)
++ tp_assign(num_bytes, ref->num_bytes)
++ tp_assign(action, action)
++ tp_assign(parent, full_ref->parent)
++ tp_assign(ref_root, full_ref->root)
++ tp_assign(owner, full_ref->objectid)
++ tp_assign(offset, full_ref->offset)
++ tp_assign(type, ref->type)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
++ tp_assign(seq, ref->seq)
++#endif
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
++ TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, "
++ "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, "
++ "offset = %llu, type = %s, seq = %llu",
++#else
++ TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, "
++ "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, "
++ "offset = %llu, type = %s",
++#endif
++ (unsigned long long)__entry->bytenr,
++ (unsigned long long)__entry->num_bytes,
++ show_ref_action(__entry->action),
++ show_root_type(__entry->parent),
++ show_root_type(__entry->ref_root),
++ (unsigned long long)__entry->owner,
++ (unsigned long long)__entry->offset,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
++ show_ref_type(__entry->type),
++ (unsigned long long)__entry->seq)
++#else
++ show_ref_type(__entry->type))
++#endif
++)
++
++TRACE_EVENT(btrfs_delayed_ref_head,
++
++ TP_PROTO(struct btrfs_delayed_ref_node *ref,
++ struct btrfs_delayed_ref_head *head_ref,
++ int action),
++
++ TP_ARGS(ref, head_ref, action),
++
++ TP_STRUCT__entry(
++ __field( u64, bytenr )
++ __field( u64, num_bytes )
++ __field( int, action )
++ __field( int, is_data )
++ ),
++
++ TP_fast_assign(
++ tp_assign(bytenr, ref->bytenr)
++ tp_assign(num_bytes, ref->num_bytes)
++ tp_assign(action, action)
++ tp_assign(is_data, head_ref->is_data)
++ ),
++
++ TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, is_data = %d",
++ (unsigned long long)__entry->bytenr,
++ (unsigned long long)__entry->num_bytes,
++ show_ref_action(__entry->action),
++ __entry->is_data)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
++
++#define show_chunk_type(type) \
++ __print_flags(type, "|", \
++ { BTRFS_BLOCK_GROUP_DATA, "DATA" }, \
++ { BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \
++ { BTRFS_BLOCK_GROUP_METADATA, "METADATA"}, \
++ { BTRFS_BLOCK_GROUP_RAID0, "RAID0" }, \
++ { BTRFS_BLOCK_GROUP_RAID1, "RAID1" }, \
++ { BTRFS_BLOCK_GROUP_DUP, "DUP" }, \
++ { BTRFS_BLOCK_GROUP_RAID10, "RAID10"}, \
++ { BTRFS_BLOCK_GROUP_RAID5, "RAID5" }, \
++ { BTRFS_BLOCK_GROUP_RAID6, "RAID6" })
++
++#else
++
++#define show_chunk_type(type) \
++ __print_flags(type, "|", \
++ { BTRFS_BLOCK_GROUP_DATA, "DATA" }, \
++ { BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \
++ { BTRFS_BLOCK_GROUP_METADATA, "METADATA"}, \
++ { BTRFS_BLOCK_GROUP_RAID0, "RAID0" }, \
++ { BTRFS_BLOCK_GROUP_RAID1, "RAID1" }, \
++ { BTRFS_BLOCK_GROUP_DUP, "DUP" }, \
++ { BTRFS_BLOCK_GROUP_RAID10, "RAID10"})
++
++#endif
++
++DECLARE_EVENT_CLASS(btrfs__chunk,
++
++ TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
++ u64 offset, u64 size),
++
++ TP_ARGS(root, map, offset, size),
++
++ TP_STRUCT__entry(
++ __field( int, num_stripes )
++ __field( u64, type )
++ __field( int, sub_stripes )
++ __field( u64, offset )
++ __field( u64, size )
++ __field( u64, root_objectid )
++ ),
++
++ TP_fast_assign(
++ tp_assign(num_stripes, map->num_stripes)
++ tp_assign(type, map->type)
++ tp_assign(sub_stripes, map->sub_stripes)
++ tp_assign(offset, offset)
++ tp_assign(size, size)
++ tp_assign(root_objectid, root->root_key.objectid)
++ ),
++
++ TP_printk("root = %llu(%s), offset = %llu, size = %llu, "
++ "num_stripes = %d, sub_stripes = %d, type = %s",
++ show_root_type(__entry->root_objectid),
++ (unsigned long long)__entry->offset,
++ (unsigned long long)__entry->size,
++ __entry->num_stripes, __entry->sub_stripes,
++ show_chunk_type(__entry->type))
++)
++
++DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc,
++
++ TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
++ u64 offset, u64 size),
++
++ TP_ARGS(root, map, offset, size)
++)
++
++DEFINE_EVENT(btrfs__chunk, btrfs_chunk_free,
++
++ TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
++ u64 offset, u64 size),
++
++ TP_ARGS(root, map, offset, size)
++)
++
++TRACE_EVENT(btrfs_cow_block,
++
++ TP_PROTO(struct btrfs_root *root, struct extent_buffer *buf,
++ struct extent_buffer *cow),
++
++ TP_ARGS(root, buf, cow),
++
++ TP_STRUCT__entry(
++ __field( u64, root_objectid )
++ __field( u64, buf_start )
++ __field( int, refs )
++ __field( u64, cow_start )
++ __field( int, buf_level )
++ __field( int, cow_level )
++ ),
++
++ TP_fast_assign(
++ tp_assign(root_objectid, root->root_key.objectid)
++ tp_assign(buf_start, buf->start)
++ tp_assign(refs, atomic_read(&buf->refs))
++ tp_assign(cow_start, cow->start)
++ tp_assign(buf_level, btrfs_header_level(buf))
++ tp_assign(cow_level, btrfs_header_level(cow))
++ ),
++
++ TP_printk("root = %llu(%s), refs = %d, orig_buf = %llu "
++ "(orig_level = %d), cow_buf = %llu (cow_level = %d)",
++ show_root_type(__entry->root_objectid),
++ __entry->refs,
++ (unsigned long long)__entry->buf_start,
++ __entry->buf_level,
++ (unsigned long long)__entry->cow_start,
++ __entry->cow_level)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++TRACE_EVENT(btrfs_space_reservation,
++
++ TP_PROTO(struct btrfs_fs_info *fs_info, char *type, u64 val,
++ u64 bytes, int reserve),
++
++ TP_ARGS(fs_info, type, val, bytes, reserve),
++
++ TP_STRUCT__entry(
++ __array( u8, fsid, BTRFS_UUID_SIZE )
++ __string( type, type )
++ __field( u64, val )
++ __field( u64, bytes )
++ __field( int, reserve )
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(fsid, fs_info->fsid, BTRFS_UUID_SIZE)
++ tp_strcpy(type, type)
++ tp_assign(val, val)
++ tp_assign(bytes, bytes)
++ tp_assign(reserve, reserve)
++ ),
++
++ TP_printk("%pU: %s: %Lu %s %Lu", __entry->fsid, __get_str(type),
++ __entry->val, __entry->reserve ? "reserve" : "release",
++ __entry->bytes)
++)
++#endif
++
++DECLARE_EVENT_CLASS(btrfs__reserved_extent,
++
++ TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
++
++ TP_ARGS(root, start, len),
++
++ TP_STRUCT__entry(
++ __field( u64, root_objectid )
++ __field( u64, start )
++ __field( u64, len )
++ ),
++
++ TP_fast_assign(
++ tp_assign(root_objectid, root->root_key.objectid)
++ tp_assign(start, start)
++ tp_assign(len, len)
++ ),
++
++ TP_printk("root = %llu(%s), start = %llu, len = %llu",
++ show_root_type(__entry->root_objectid),
++ (unsigned long long)__entry->start,
++ (unsigned long long)__entry->len)
++)
++
++DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_alloc,
++
++ TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
++
++ TP_ARGS(root, start, len)
++)
++
++DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_free,
++
++ TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
++
++ TP_ARGS(root, start, len)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++TRACE_EVENT_MAP(find_free_extent,
++
++ btrfs_find_free_extent,
++
++ TP_PROTO(struct btrfs_root *root, u64 num_bytes, u64 empty_size,
++ u64 data),
++
++ TP_ARGS(root, num_bytes, empty_size, data),
++
++ TP_STRUCT__entry(
++ __field( u64, root_objectid )
++ __field( u64, num_bytes )
++ __field( u64, empty_size )
++ __field( u64, data )
++ ),
++
++ TP_fast_assign(
++ tp_assign(root_objectid, root->root_key.objectid)
++ tp_assign(num_bytes, num_bytes)
++ tp_assign(empty_size, empty_size)
++ tp_assign(data, data)
++ ),
++
++ TP_printk("root = %Lu(%s), len = %Lu, empty_size = %Lu, "
++ "flags = %Lu(%s)", show_root_type(__entry->root_objectid),
++ __entry->num_bytes, __entry->empty_size, __entry->data,
++ __print_flags((unsigned long)__entry->data, "|",
++ BTRFS_GROUP_FLAGS))
++)
++
++DECLARE_EVENT_CLASS(btrfs__reserve_extent,
++
++ TP_PROTO(struct btrfs_root *root,
++ struct btrfs_block_group_cache *block_group, u64 start,
++ u64 len),
++
++ TP_ARGS(root, block_group, start, len),
++
++ TP_STRUCT__entry(
++ __field( u64, root_objectid )
++ __field( u64, bg_objectid )
++ __field( u64, flags )
++ __field( u64, start )
++ __field( u64, len )
++ ),
++
++ TP_fast_assign(
++ tp_assign(root_objectid, root->root_key.objectid)
++ tp_assign(bg_objectid, block_group->key.objectid)
++ tp_assign(flags, block_group->flags)
++ tp_assign(start, start)
++ tp_assign(len, len)
++ ),
++
++ TP_printk("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), "
++ "start = %Lu, len = %Lu",
++ show_root_type(__entry->root_objectid), __entry->bg_objectid,
++ __entry->flags, __print_flags((unsigned long)__entry->flags,
++ "|", BTRFS_GROUP_FLAGS),
++ __entry->start, __entry->len)
++)
++
++DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent,
++
++ TP_PROTO(struct btrfs_root *root,
++ struct btrfs_block_group_cache *block_group, u64 start,
++ u64 len),
++
++ TP_ARGS(root, block_group, start, len)
++)
++
++DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster,
++
++ TP_PROTO(struct btrfs_root *root,
++ struct btrfs_block_group_cache *block_group, u64 start,
++ u64 len),
++
++ TP_ARGS(root, block_group, start, len)
++)
++
++TRACE_EVENT(btrfs_find_cluster,
++
++ TP_PROTO(struct btrfs_block_group_cache *block_group, u64 start,
++ u64 bytes, u64 empty_size, u64 min_bytes),
++
++ TP_ARGS(block_group, start, bytes, empty_size, min_bytes),
++
++ TP_STRUCT__entry(
++ __field( u64, bg_objectid )
++ __field( u64, flags )
++ __field( u64, start )
++ __field( u64, bytes )
++ __field( u64, empty_size )
++ __field( u64, min_bytes )
++ ),
++
++ TP_fast_assign(
++ tp_assign(bg_objectid, block_group->key.objectid)
++ tp_assign(flags, block_group->flags)
++ tp_assign(start, start)
++ tp_assign(bytes, bytes)
++ tp_assign(empty_size, empty_size)
++ tp_assign(min_bytes, min_bytes)
++ ),
++
++ TP_printk("block_group = %Lu, flags = %Lu(%s), start = %Lu, len = %Lu,"
++ " empty_size = %Lu, min_bytes = %Lu", __entry->bg_objectid,
++ __entry->flags,
++ __print_flags((unsigned long)__entry->flags, "|",
++ BTRFS_GROUP_FLAGS), __entry->start,
++ __entry->bytes, __entry->empty_size, __entry->min_bytes)
++)
++
++TRACE_EVENT(btrfs_failed_cluster_setup,
++
++ TP_PROTO(struct btrfs_block_group_cache *block_group),
++
++ TP_ARGS(block_group),
++
++ TP_STRUCT__entry(
++ __field( u64, bg_objectid )
++ ),
++
++ TP_fast_assign(
++ tp_assign(bg_objectid, block_group->key.objectid)
++ ),
++
++ TP_printk("block_group = %Lu", __entry->bg_objectid)
++)
++
++TRACE_EVENT(btrfs_setup_cluster,
++
++ TP_PROTO(struct btrfs_block_group_cache *block_group,
++ struct btrfs_free_cluster *cluster, u64 size, int bitmap),
++
++ TP_ARGS(block_group, cluster, size, bitmap),
++
++ TP_STRUCT__entry(
++ __field( u64, bg_objectid )
++ __field( u64, flags )
++ __field( u64, start )
++ __field( u64, max_size )
++ __field( u64, size )
++ __field( int, bitmap )
++ ),
++
++ TP_fast_assign(
++ tp_assign(bg_objectid, block_group->key.objectid)
++ tp_assign(flags, block_group->flags)
++ tp_assign(start, cluster->window_start)
++ tp_assign(max_size, cluster->max_size)
++ tp_assign(size, size)
++ tp_assign(bitmap, bitmap)
++ ),
++
++ TP_printk("block_group = %Lu, flags = %Lu(%s), window_start = %Lu, "
++ "size = %Lu, max_size = %Lu, bitmap = %d",
++ __entry->bg_objectid,
++ __entry->flags,
++ __print_flags((unsigned long)__entry->flags, "|",
++ BTRFS_GROUP_FLAGS), __entry->start,
++ __entry->size, __entry->max_size, __entry->bitmap)
++)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++TRACE_EVENT_MAP(alloc_extent_state,
++
++ btrfs_alloc_extent_state,
++
++ TP_PROTO(struct extent_state *state, gfp_t mask, unsigned long IP),
++
++ TP_ARGS(state, mask, IP),
++
++ TP_STRUCT__entry(
++ __field(struct extent_state *, state)
++ __field(gfp_t, mask)
++ __field(unsigned long, ip)
++ ),
++
++ TP_fast_assign(
++ tp_assign(state, state)
++ tp_assign(mask, mask)
++ tp_assign(ip, IP)
++ ),
++
++ TP_printk("state=%p; mask = %s; caller = %pF", __entry->state,
++ show_gfp_flags(__entry->mask), (void *)__entry->ip)
++)
++
++TRACE_EVENT_MAP(free_extent_state,
++
++ btrfs_free_extent_state,
++
++ TP_PROTO(struct extent_state *state, unsigned long IP),
++
++ TP_ARGS(state, IP),
++
++ TP_STRUCT__entry(
++ __field(struct extent_state *, state)
++ __field(unsigned long, ip)
++ ),
++
++ TP_fast_assign(
++ tp_assign(state, state)
++ tp_assign(ip, IP)
++ ),
++
++ TP_printk(" state=%p; caller = %pF", __entry->state,
++ (void *)__entry->ip)
++)
++#endif
++
++#endif /* _TRACE_BTRFS_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/compaction.h
+@@ -0,0 +1,74 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM compaction
++
++#if !defined(_TRACE_COMPACTION_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_COMPACTION_H
++
++#include <linux/types.h>
++#include <linux/tracepoint.h>
++#include <trace/events/gfpflags.h>
++
++DECLARE_EVENT_CLASS(mm_compaction_isolate_template,
++
++ TP_PROTO(unsigned long nr_scanned,
++ unsigned long nr_taken),
++
++ TP_ARGS(nr_scanned, nr_taken),
++
++ TP_STRUCT__entry(
++ __field(unsigned long, nr_scanned)
++ __field(unsigned long, nr_taken)
++ ),
++
++ TP_fast_assign(
++ tp_assign(nr_scanned, nr_scanned)
++ tp_assign(nr_taken, nr_taken)
++ ),
++
++ TP_printk("nr_scanned=%lu nr_taken=%lu",
++ __entry->nr_scanned,
++ __entry->nr_taken)
++)
++
++DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_migratepages,
++
++ TP_PROTO(unsigned long nr_scanned,
++ unsigned long nr_taken),
++
++ TP_ARGS(nr_scanned, nr_taken)
++)
++
++DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
++ TP_PROTO(unsigned long nr_scanned,
++ unsigned long nr_taken),
++
++ TP_ARGS(nr_scanned, nr_taken)
++)
++
++TRACE_EVENT(mm_compaction_migratepages,
++
++ TP_PROTO(unsigned long nr_migrated,
++ unsigned long nr_failed),
++
++ TP_ARGS(nr_migrated, nr_failed),
++
++ TP_STRUCT__entry(
++ __field(unsigned long, nr_migrated)
++ __field(unsigned long, nr_failed)
++ ),
++
++ TP_fast_assign(
++ tp_assign(nr_migrated, nr_migrated)
++ tp_assign(nr_failed, nr_failed)
++ ),
++
++ TP_printk("nr_migrated=%lu nr_failed=%lu",
++ __entry->nr_migrated,
++ __entry->nr_failed)
++)
++
++
++#endif /* _TRACE_COMPACTION_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/ext3.h
+@@ -0,0 +1,902 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM ext3
++
++#if !defined(_TRACE_EXT3_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_EXT3_H
++
++#include <linux/tracepoint.h>
++#include <linux/version.h>
++
++TRACE_EVENT(ext3_free_inode,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( umode_t, mode )
++ __field( uid_t, uid )
++ __field( gid_t, gid )
++ __field( blkcnt_t, blocks )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(mode, inode->i_mode)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0))
++ tp_assign(uid, i_uid_read(inode))
++ tp_assign(gid, i_gid_read(inode))
++#else
++ tp_assign(uid, inode->i_uid)
++ tp_assign(gid, inode->i_gid)
++#endif
++ tp_assign(blocks, inode->i_blocks)
++ ),
++
++ TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->mode, __entry->uid, __entry->gid,
++ (unsigned long) __entry->blocks)
++)
++
++TRACE_EVENT(ext3_request_inode,
++ TP_PROTO(struct inode *dir, int mode),
++
++ TP_ARGS(dir, mode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, dir )
++ __field( umode_t, mode )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, dir->i_sb->s_dev)
++ tp_assign(dir, dir->i_ino)
++ tp_assign(mode, mode)
++ ),
++
++ TP_printk("dev %d,%d dir %lu mode 0%o",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->dir, __entry->mode)
++)
++
++TRACE_EVENT(ext3_allocate_inode,
++ TP_PROTO(struct inode *inode, struct inode *dir, int mode),
++
++ TP_ARGS(inode, dir, mode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ino_t, dir )
++ __field( umode_t, mode )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(dir, dir->i_ino)
++ tp_assign(mode, mode)
++ ),
++
++ TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long) __entry->dir, __entry->mode)
++)
++
++TRACE_EVENT(ext3_evict_inode,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( int, nlink )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(nlink, inode->i_nlink)
++ ),
++
++ TP_printk("dev %d,%d ino %lu nlink %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->nlink)
++)
++
++TRACE_EVENT(ext3_drop_inode,
++ TP_PROTO(struct inode *inode, int drop),
++
++ TP_ARGS(inode, drop),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( int, drop )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(drop, drop)
++ ),
++
++ TP_printk("dev %d,%d ino %lu drop %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->drop)
++)
++
++TRACE_EVENT(ext3_mark_inode_dirty,
++ TP_PROTO(struct inode *inode, unsigned long IP),
++
++ TP_ARGS(inode, IP),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field(unsigned long, ip )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(ip, IP)
++ ),
++
++ TP_printk("dev %d,%d ino %lu caller %pF",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, (void *)__entry->ip)
++)
++
++TRACE_EVENT(ext3_write_begin,
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int flags),
++
++ TP_ARGS(inode, pos, len, flags),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( loff_t, pos )
++ __field( unsigned int, len )
++ __field( unsigned int, flags )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(pos, pos)
++ tp_assign(len, len)
++ tp_assign(flags, flags)
++ ),
++
++ TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long long) __entry->pos, __entry->len,
++ __entry->flags)
++)
++
++DECLARE_EVENT_CLASS(ext3__write_end,
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int copied),
++
++ TP_ARGS(inode, pos, len, copied),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( loff_t, pos )
++ __field( unsigned int, len )
++ __field( unsigned int, copied )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(pos, pos)
++ tp_assign(len, len)
++ tp_assign(copied, copied)
++ ),
++
++ TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long long) __entry->pos, __entry->len,
++ __entry->copied)
++)
++
++DEFINE_EVENT(ext3__write_end, ext3_ordered_write_end,
++
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int copied),
++
++ TP_ARGS(inode, pos, len, copied)
++)
++
++DEFINE_EVENT(ext3__write_end, ext3_writeback_write_end,
++
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int copied),
++
++ TP_ARGS(inode, pos, len, copied)
++)
++
++DEFINE_EVENT(ext3__write_end, ext3_journalled_write_end,
++
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int copied),
++
++ TP_ARGS(inode, pos, len, copied)
++)
++
++DECLARE_EVENT_CLASS(ext3__page_op,
++ TP_PROTO(struct page *page),
++
++ TP_ARGS(page),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( pgoff_t, index )
++
++ ),
++
++ TP_fast_assign(
++ tp_assign(index, page->index)
++ tp_assign(ino, page->mapping->host->i_ino)
++ tp_assign(dev, page->mapping->host->i_sb->s_dev)
++ ),
++
++ TP_printk("dev %d,%d ino %lu page_index %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->index)
++)
++
++DEFINE_EVENT(ext3__page_op, ext3_ordered_writepage,
++
++ TP_PROTO(struct page *page),
++
++ TP_ARGS(page)
++)
++
++DEFINE_EVENT(ext3__page_op, ext3_writeback_writepage,
++
++ TP_PROTO(struct page *page),
++
++ TP_ARGS(page)
++)
++
++DEFINE_EVENT(ext3__page_op, ext3_journalled_writepage,
++
++ TP_PROTO(struct page *page),
++
++ TP_ARGS(page)
++)
++
++DEFINE_EVENT(ext3__page_op, ext3_readpage,
++
++ TP_PROTO(struct page *page),
++
++ TP_ARGS(page)
++)
++
++DEFINE_EVENT(ext3__page_op, ext3_releasepage,
++
++ TP_PROTO(struct page *page),
++
++ TP_ARGS(page)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
++
++TRACE_EVENT(ext3_invalidatepage,
++ TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
++
++ TP_ARGS(page, offset, length),
++
++ TP_STRUCT__entry(
++ __field( pgoff_t, index )
++ __field( unsigned int, offset )
++ __field( unsigned int, length )
++ __field( ino_t, ino )
++ __field( dev_t, dev )
++
++ ),
++
++ TP_fast_assign(
++ tp_assign(index, page->index)
++ tp_assign(offset, offset)
++ tp_assign(length, length)
++ tp_assign(ino, page->mapping->host->i_ino)
++ tp_assign(dev, page->mapping->host->i_sb->s_dev)
++ ),
++
++ TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->index, __entry->offset, __entry->length)
++)
++
++#else
++
++TRACE_EVENT(ext3_invalidatepage,
++ TP_PROTO(struct page *page, unsigned long offset),
++
++ TP_ARGS(page, offset),
++
++ TP_STRUCT__entry(
++ __field( pgoff_t, index )
++ __field( unsigned long, offset )
++ __field( ino_t, ino )
++ __field( dev_t, dev )
++
++ ),
++
++ TP_fast_assign(
++ tp_assign(index, page->index)
++ tp_assign(offset, offset)
++ tp_assign(ino, page->mapping->host->i_ino)
++ tp_assign(dev, page->mapping->host->i_sb->s_dev)
++ ),
++
++ TP_printk("dev %d,%d ino %lu page_index %lu offset %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->index, __entry->offset)
++)
++
++#endif
++
++TRACE_EVENT(ext3_discard_blocks,
++ TP_PROTO(struct super_block *sb, unsigned long blk,
++ unsigned long count),
++
++ TP_ARGS(sb, blk, count),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( unsigned long, blk )
++ __field( unsigned long, count )
++
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, sb->s_dev)
++ tp_assign(blk, blk)
++ tp_assign(count, count)
++ ),
++
++ TP_printk("dev %d,%d blk %lu count %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->blk, __entry->count)
++)
++
++TRACE_EVENT(ext3_request_blocks,
++ TP_PROTO(struct inode *inode, unsigned long goal,
++ unsigned long count),
++
++ TP_ARGS(inode, goal, count),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( unsigned long, count )
++ __field( unsigned long, goal )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(count, count)
++ tp_assign(goal, goal)
++ ),
++
++ TP_printk("dev %d,%d ino %lu count %lu goal %lu ",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->count, __entry->goal)
++)
++
++TRACE_EVENT(ext3_allocate_blocks,
++ TP_PROTO(struct inode *inode, unsigned long goal,
++ unsigned long count, unsigned long block),
++
++ TP_ARGS(inode, goal, count, block),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( unsigned long, block )
++ __field( unsigned long, count )
++ __field( unsigned long, goal )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(block, block)
++ tp_assign(count, count)
++ tp_assign(goal, goal)
++ ),
++
++ TP_printk("dev %d,%d ino %lu count %lu block %lu goal %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->count, __entry->block,
++ __entry->goal)
++)
++
++TRACE_EVENT(ext3_free_blocks,
++ TP_PROTO(struct inode *inode, unsigned long block,
++ unsigned long count),
++
++ TP_ARGS(inode, block, count),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( umode_t, mode )
++ __field( unsigned long, block )
++ __field( unsigned long, count )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(mode, inode->i_mode)
++ tp_assign(block, block)
++ tp_assign(count, count)
++ ),
++
++ TP_printk("dev %d,%d ino %lu mode 0%o block %lu count %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->mode, __entry->block, __entry->count)
++)
++
++TRACE_EVENT(ext3_sync_file_enter,
++ TP_PROTO(struct file *file, int datasync),
++
++ TP_ARGS(file, datasync),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ino_t, parent )
++ __field( int, datasync )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, file->f_path.dentry->d_inode->i_sb->s_dev)
++ tp_assign(ino, file->f_path.dentry->d_inode->i_ino)
++ tp_assign(datasync, datasync)
++ tp_assign(parent, file->f_path.dentry->d_parent->d_inode->i_ino)
++ ),
++
++ TP_printk("dev %d,%d ino %lu parent %ld datasync %d ",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long) __entry->parent, __entry->datasync)
++)
++
++TRACE_EVENT(ext3_sync_file_exit,
++ TP_PROTO(struct inode *inode, int ret),
++
++ TP_ARGS(inode, ret),
++
++ TP_STRUCT__entry(
++ __field( int, ret )
++ __field( ino_t, ino )
++ __field( dev_t, dev )
++ ),
++
++ TP_fast_assign(
++ tp_assign(ret, ret)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(dev, inode->i_sb->s_dev)
++ ),
++
++ TP_printk("dev %d,%d ino %lu ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->ret)
++)
++
++TRACE_EVENT(ext3_sync_fs,
++ TP_PROTO(struct super_block *sb, int wait),
++
++ TP_ARGS(sb, wait),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, wait )
++
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, sb->s_dev)
++ tp_assign(wait, wait)
++ ),
++
++ TP_printk("dev %d,%d wait %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->wait)
++)
++
++TRACE_EVENT(ext3_rsv_window_add,
++ TP_PROTO(struct super_block *sb,
++ struct ext3_reserve_window_node *rsv_node),
++
++ TP_ARGS(sb, rsv_node),
++
++ TP_STRUCT__entry(
++ __field( unsigned long, start )
++ __field( unsigned long, end )
++ __field( dev_t, dev )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, sb->s_dev)
++ tp_assign(start, rsv_node->rsv_window._rsv_start)
++ tp_assign(end, rsv_node->rsv_window._rsv_end)
++ ),
++
++ TP_printk("dev %d,%d start %lu end %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->start, __entry->end)
++)
++
++TRACE_EVENT(ext3_discard_reservation,
++ TP_PROTO(struct inode *inode,
++ struct ext3_reserve_window_node *rsv_node),
++
++ TP_ARGS(inode, rsv_node),
++
++ TP_STRUCT__entry(
++ __field( unsigned long, start )
++ __field( unsigned long, end )
++ __field( ino_t, ino )
++ __field( dev_t, dev )
++ ),
++
++ TP_fast_assign(
++ tp_assign(start, rsv_node->rsv_window._rsv_start)
++ tp_assign(end, rsv_node->rsv_window._rsv_end)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(dev, inode->i_sb->s_dev)
++ ),
++
++ TP_printk("dev %d,%d ino %lu start %lu end %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long)__entry->ino, __entry->start,
++ __entry->end)
++)
++
++TRACE_EVENT(ext3_alloc_new_reservation,
++ TP_PROTO(struct super_block *sb, unsigned long goal),
++
++ TP_ARGS(sb, goal),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( unsigned long, goal )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, sb->s_dev)
++ tp_assign(goal, goal)
++ ),
++
++ TP_printk("dev %d,%d goal %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->goal)
++)
++
++TRACE_EVENT(ext3_reserved,
++ TP_PROTO(struct super_block *sb, unsigned long block,
++ struct ext3_reserve_window_node *rsv_node),
++
++ TP_ARGS(sb, block, rsv_node),
++
++ TP_STRUCT__entry(
++ __field( unsigned long, block )
++ __field( unsigned long, start )
++ __field( unsigned long, end )
++ __field( dev_t, dev )
++ ),
++
++ TP_fast_assign(
++ tp_assign(block, block)
++ tp_assign(start, rsv_node->rsv_window._rsv_start)
++ tp_assign(end, rsv_node->rsv_window._rsv_end)
++ tp_assign(dev, sb->s_dev)
++ ),
++
++ TP_printk("dev %d,%d block %lu, start %lu end %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->block, __entry->start, __entry->end)
++)
++
++TRACE_EVENT(ext3_forget,
++ TP_PROTO(struct inode *inode, int is_metadata, unsigned long block),
++
++ TP_ARGS(inode, is_metadata, block),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( umode_t, mode )
++ __field( int, is_metadata )
++ __field( unsigned long, block )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(mode, inode->i_mode)
++ tp_assign(is_metadata, is_metadata)
++ tp_assign(block, block)
++ ),
++
++ TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->mode, __entry->is_metadata, __entry->block)
++)
++
++TRACE_EVENT(ext3_read_block_bitmap,
++ TP_PROTO(struct super_block *sb, unsigned int group),
++
++ TP_ARGS(sb, group),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( __u32, group )
++
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, sb->s_dev)
++ tp_assign(group, group)
++ ),
++
++ TP_printk("dev %d,%d group %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->group)
++)
++
++TRACE_EVENT(ext3_direct_IO_enter,
++ TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
++
++ TP_ARGS(inode, offset, len, rw),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( dev_t, dev )
++ __field( loff_t, pos )
++ __field( unsigned long, len )
++ __field( int, rw )
++ ),
++
++ TP_fast_assign(
++ tp_assign(ino, inode->i_ino)
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(pos, offset)
++ tp_assign(len, len)
++ tp_assign(rw, rw)
++ ),
++
++ TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long long) __entry->pos, __entry->len,
++ __entry->rw)
++)
++
++TRACE_EVENT(ext3_direct_IO_exit,
++ TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
++ int rw, int ret),
++
++ TP_ARGS(inode, offset, len, rw, ret),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( dev_t, dev )
++ __field( loff_t, pos )
++ __field( unsigned long, len )
++ __field( int, rw )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ tp_assign(ino, inode->i_ino)
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(pos, offset)
++ tp_assign(len, len)
++ tp_assign(rw, rw)
++ tp_assign(ret, ret)
++ ),
++
++ TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long long) __entry->pos, __entry->len,
++ __entry->rw, __entry->ret)
++)
++
++TRACE_EVENT(ext3_unlink_enter,
++ TP_PROTO(struct inode *parent, struct dentry *dentry),
++
++ TP_ARGS(parent, dentry),
++
++ TP_STRUCT__entry(
++ __field( ino_t, parent )
++ __field( ino_t, ino )
++ __field( loff_t, size )
++ __field( dev_t, dev )
++ ),
++
++ TP_fast_assign(
++ tp_assign(parent, parent->i_ino)
++ tp_assign(ino, dentry->d_inode->i_ino)
++ tp_assign(size, dentry->d_inode->i_size)
++ tp_assign(dev, dentry->d_inode->i_sb->s_dev)
++ ),
++
++ TP_printk("dev %d,%d ino %lu size %lld parent %ld",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long long)__entry->size,
++ (unsigned long) __entry->parent)
++)
++
++TRACE_EVENT(ext3_unlink_exit,
++ TP_PROTO(struct dentry *dentry, int ret),
++
++ TP_ARGS(dentry, ret),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( dev_t, dev )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ tp_assign(ino, dentry->d_inode->i_ino)
++ tp_assign(dev, dentry->d_inode->i_sb->s_dev)
++ tp_assign(ret, ret)
++ ),
++
++ TP_printk("dev %d,%d ino %lu ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->ret)
++)
++
++DECLARE_EVENT_CLASS(ext3__truncate,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( dev_t, dev )
++ __field( blkcnt_t, blocks )
++ ),
++
++ TP_fast_assign(
++ tp_assign(ino, inode->i_ino)
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(blocks, inode->i_blocks)
++ ),
++
++ TP_printk("dev %d,%d ino %lu blocks %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, (unsigned long) __entry->blocks)
++)
++
++DEFINE_EVENT(ext3__truncate, ext3_truncate_enter,
++
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode)
++)
++
++DEFINE_EVENT(ext3__truncate, ext3_truncate_exit,
++
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode)
++)
++
++TRACE_EVENT(ext3_get_blocks_enter,
++ TP_PROTO(struct inode *inode, unsigned long lblk,
++ unsigned long len, int create),
++
++ TP_ARGS(inode, lblk, len, create),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( dev_t, dev )
++ __field( unsigned long, lblk )
++ __field( unsigned long, len )
++ __field( int, create )
++ ),
++
++ TP_fast_assign(
++ tp_assign(ino, inode->i_ino)
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(lblk, lblk)
++ tp_assign(len, len)
++ tp_assign(create, create)
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %lu len %lu create %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->lblk, __entry->len, __entry->create)
++)
++
++TRACE_EVENT(ext3_get_blocks_exit,
++ TP_PROTO(struct inode *inode, unsigned long lblk,
++ unsigned long pblk, unsigned long len, int ret),
++
++ TP_ARGS(inode, lblk, pblk, len, ret),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( dev_t, dev )
++ __field( unsigned long, lblk )
++ __field( unsigned long, pblk )
++ __field( unsigned long, len )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ tp_assign(ino, inode->i_ino)
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(lblk, lblk)
++ tp_assign(pblk, pblk)
++ tp_assign(len, len)
++ tp_assign(ret, ret)
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %lu pblk %lu len %lu ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->lblk, __entry->pblk,
++ __entry->len, __entry->ret)
++)
++
++TRACE_EVENT(ext3_load_inode,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( dev_t, dev )
++ ),
++
++ TP_fast_assign(
++ tp_assign(ino, inode->i_ino)
++ tp_assign(dev, inode->i_sb->s_dev)
++ ),
++
++ TP_printk("dev %d,%d ino %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino)
++)
++
++#endif /* _TRACE_EXT3_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/ext4.h
+@@ -0,0 +1,3130 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM ext4
++
++#if !defined(_TRACE_EXT4_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_EXT4_H
++
++#include <linux/writeback.h>
++#include <linux/tracepoint.h>
++#include <linux/version.h>
++
++#ifndef _TRACE_EXT4_DEF_
++#define _TRACE_EXT4_DEF_
++struct ext4_allocation_context;
++struct ext4_allocation_request;
++struct ext4_prealloc_space;
++struct ext4_inode_info;
++struct mpage_da_data;
++struct ext4_map_blocks;
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
++struct ext4_extent;
++#endif
++#endif
++
++#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
++#define TP_MODE_T __u16
++#else
++#define TP_MODE_T umode_t
++#endif
++
++TRACE_EVENT(ext4_free_inode,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( uid_t, uid )
++ __field( gid_t, gid )
++ __field( __u64, blocks )
++ __field( TP_MODE_T, mode )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0))
++ tp_assign(uid, i_uid_read(inode))
++ tp_assign(gid, i_gid_read(inode))
++#else
++ tp_assign(uid, inode->i_uid)
++ tp_assign(gid, inode->i_gid)
++#endif
++ tp_assign(blocks, inode->i_blocks)
++ tp_assign(mode, inode->i_mode)
++ ),
++
++ TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->mode,
++ __entry->uid, __entry->gid, __entry->blocks)
++)
++
++TRACE_EVENT(ext4_request_inode,
++ TP_PROTO(struct inode *dir, int mode),
++
++ TP_ARGS(dir, mode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, dir )
++ __field( TP_MODE_T, mode )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, dir->i_sb->s_dev)
++ tp_assign(dir, dir->i_ino)
++ tp_assign(mode, mode)
++ ),
++
++ TP_printk("dev %d,%d dir %lu mode 0%o",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->dir, __entry->mode)
++)
++
++TRACE_EVENT(ext4_allocate_inode,
++ TP_PROTO(struct inode *inode, struct inode *dir, int mode),
++
++ TP_ARGS(inode, dir, mode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ino_t, dir )
++ __field( TP_MODE_T, mode )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(dir, dir->i_ino)
++ tp_assign(mode, mode)
++ ),
++
++ TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long) __entry->dir, __entry->mode)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++TRACE_EVENT(ext4_evict_inode,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( int, nlink )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(nlink, inode->i_nlink)
++ ),
++
++ TP_printk("dev %d,%d ino %lu nlink %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->nlink)
++)
++
++TRACE_EVENT(ext4_drop_inode,
++ TP_PROTO(struct inode *inode, int drop),
++
++ TP_ARGS(inode, drop),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( int, drop )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(drop, drop)
++ ),
++
++ TP_printk("dev %d,%d ino %lu drop %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->drop)
++)
++
++TRACE_EVENT(ext4_mark_inode_dirty,
++ TP_PROTO(struct inode *inode, unsigned long IP),
++
++ TP_ARGS(inode, IP),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field(unsigned long, ip )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(ip, IP)
++ ),
++
++ TP_printk("dev %d,%d ino %lu caller %pF",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, (void *)__entry->ip)
++)
++
++TRACE_EVENT(ext4_begin_ordered_truncate,
++ TP_PROTO(struct inode *inode, loff_t new_size),
++
++ TP_ARGS(inode, new_size),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( loff_t, new_size )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(new_size, new_size)
++ ),
++
++ TP_printk("dev %d,%d ino %lu new_size %lld",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->new_size)
++)
++#endif
++
++DECLARE_EVENT_CLASS(ext4__write_begin,
++
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int flags),
++
++ TP_ARGS(inode, pos, len, flags),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( loff_t, pos )
++ __field( unsigned int, len )
++ __field( unsigned int, flags )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(pos, pos)
++ tp_assign(len, len)
++ tp_assign(flags, flags)
++ ),
++
++ TP_printk("dev %d,%d ino %lu pos %lld len %u flags %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->pos, __entry->len, __entry->flags)
++)
++
++DEFINE_EVENT(ext4__write_begin, ext4_write_begin,
++
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int flags),
++
++ TP_ARGS(inode, pos, len, flags)
++)
++
++DEFINE_EVENT(ext4__write_begin, ext4_da_write_begin,
++
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int flags),
++
++ TP_ARGS(inode, pos, len, flags)
++)
++
++DECLARE_EVENT_CLASS(ext4__write_end,
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int copied),
++
++ TP_ARGS(inode, pos, len, copied),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( loff_t, pos )
++ __field( unsigned int, len )
++ __field( unsigned int, copied )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(pos, pos)
++ tp_assign(len, len)
++ tp_assign(copied, copied)
++ ),
++
++ TP_printk("dev %d,%d ino %lu pos %lld len %u copied %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->pos, __entry->len, __entry->copied)
++)
++
++DEFINE_EVENT(ext4__write_end, ext4_ordered_write_end,
++
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int copied),
++
++ TP_ARGS(inode, pos, len, copied)
++)
++
++DEFINE_EVENT(ext4__write_end, ext4_writeback_write_end,
++
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int copied),
++
++ TP_ARGS(inode, pos, len, copied)
++)
++
++DEFINE_EVENT(ext4__write_end, ext4_journalled_write_end,
++
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int copied),
++
++ TP_ARGS(inode, pos, len, copied)
++)
++
++DEFINE_EVENT(ext4__write_end, ext4_da_write_end,
++
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int copied),
++
++ TP_ARGS(inode, pos, len, copied)
++)
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40))
++TRACE_EVENT(ext4_writepage,
++ TP_PROTO(struct inode *inode, struct page *page),
++
++ TP_ARGS(inode, page),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( pgoff_t, index )
++
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(index, page->index)
++ ),
++
++ TP_printk("dev %d,%d ino %lu page_index %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->index)
++)
++#endif
++
++TRACE_EVENT(ext4_da_writepages,
++ TP_PROTO(struct inode *inode, struct writeback_control *wbc),
++
++ TP_ARGS(inode, wbc),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( long, nr_to_write )
++ __field( long, pages_skipped )
++ __field( loff_t, range_start )
++ __field( loff_t, range_end )
++ __field( pgoff_t, writeback_index )
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ __field( int, sync_mode )
++#endif
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37))
++ __field( char, nonblocking )
++#endif
++ __field( char, for_kupdate )
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
++ __field( char, for_reclaim )
++#endif
++ __field( char, range_cyclic )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(nr_to_write, wbc->nr_to_write)
++ tp_assign(pages_skipped, wbc->pages_skipped)
++ tp_assign(range_start, wbc->range_start)
++ tp_assign(range_end, wbc->range_end)
++ tp_assign(writeback_index, inode->i_mapping->writeback_index)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ tp_assign(sync_mode, wbc->sync_mode)
++#endif
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37))
++ tp_assign(nonblocking, wbc->nonblocking)
++#endif
++ tp_assign(for_kupdate, wbc->for_kupdate)
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
++ tp_assign(for_reclaim, wbc->for_reclaim)
++#endif
++ tp_assign(range_cyclic, wbc->range_cyclic)
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld "
++ "range_start %lld range_end %lld sync_mode %d "
++ "for_kupdate %d range_cyclic %d writeback_index %lu",
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++ TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld "
++ "range_start %llu range_end %llu "
++ "for_kupdate %d for_reclaim %d "
++ "range_cyclic %d writeback_index %lu",
++#else
++ TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld "
++ "range_start %llu range_end %llu "
++ "nonblocking %d for_kupdate %d for_reclaim %d "
++ "range_cyclic %d writeback_index %lu",
++#endif
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->nr_to_write,
++ __entry->pages_skipped, __entry->range_start,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ __entry->range_end, __entry->sync_mode,
++ __entry->for_kupdate, __entry->range_cyclic,
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++ __entry->range_end,
++ __entry->for_kupdate, __entry->for_reclaim,
++ __entry->range_cyclic,
++#else
++ __entry->range_end, __entry->nonblocking,
++ __entry->for_kupdate, __entry->for_reclaim,
++ __entry->range_cyclic,
++#endif
++ (unsigned long) __entry->writeback_index)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
++
++TRACE_EVENT(ext4_da_write_pages,
++ TP_PROTO(struct inode *inode, pgoff_t first_page,
++ struct writeback_control *wbc),
++
++ TP_ARGS(inode, first_page, wbc),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( pgoff_t, first_page )
++ __field( long, nr_to_write )
++ __field( int, sync_mode )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(first_page, first_page)
++ tp_assign(nr_to_write, wbc->nr_to_write)
++ tp_assign(sync_mode, wbc->sync_mode)
++ ),
++
++ TP_printk("dev %d,%d ino %lu first_page %lu nr_to_write %ld "
++ "sync_mode %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->first_page,
++ __entry->nr_to_write, __entry->sync_mode)
++)
++
++#else
++
++TRACE_EVENT(ext4_da_write_pages,
++ TP_PROTO(struct inode *inode, struct mpage_da_data *mpd),
++
++ TP_ARGS(inode, mpd),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u64, b_blocknr )
++ __field( __u32, b_size )
++ __field( __u32, b_state )
++ __field( unsigned long, first_page )
++ __field( int, io_done )
++ __field( int, pages_written )
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ __field( int, sync_mode )
++#endif
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(b_blocknr, mpd->b_blocknr)
++ tp_assign(b_size, mpd->b_size)
++ tp_assign(b_state, mpd->b_state)
++ tp_assign(first_page, mpd->first_page)
++ tp_assign(io_done, mpd->io_done)
++ tp_assign(pages_written, mpd->pages_written)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ tp_assign(sync_mode, mpd->wbc->sync_mode)
++#endif
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ TP_printk("dev %d,%d ino %lu b_blocknr %llu b_size %u b_state 0x%04x "
++ "first_page %lu io_done %d pages_written %d sync_mode %d",
++#else
++ TP_printk("dev %d,%d ino %lu b_blocknr %llu b_size %u b_state 0x%04x "
++ "first_page %lu io_done %d pages_written %d",
++#endif
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->b_blocknr, __entry->b_size,
++ __entry->b_state, __entry->first_page,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ __entry->io_done, __entry->pages_written,
++ __entry->sync_mode
++#else
++ __entry->io_done, __entry->pages_written
++#endif
++ )
++)
++
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
++
++TRACE_EVENT(ext4_da_write_pages_extent,
++ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map),
++
++ TP_ARGS(inode, map),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u64, lblk )
++ __field( __u32, len )
++ __field( __u32, flags )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(lblk, map->m_lblk)
++ tp_assign(len, map->m_len)
++ tp_assign(flags, map->m_flags)
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %llu len %u flags %s",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->lblk, __entry->len,
++ show_mflags(__entry->flags))
++)
++
++#endif
++
++TRACE_EVENT(ext4_da_writepages_result,
++ TP_PROTO(struct inode *inode, struct writeback_control *wbc,
++ int ret, int pages_written),
++
++ TP_ARGS(inode, wbc, ret, pages_written),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( int, ret )
++ __field( int, pages_written )
++ __field( long, pages_skipped )
++ __field( pgoff_t, writeback_index )
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ __field( int, sync_mode )
++#endif
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
++ __field( char, encountered_congestion )
++#endif
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
++ __field( char, more_io )
++#endif
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
++ __field( char, no_nrwrite_index_update )
++#endif
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(ret, ret)
++ tp_assign(pages_written, pages_written)
++ tp_assign(pages_skipped, wbc->pages_skipped)
++ tp_assign(writeback_index, inode->i_mapping->writeback_index)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ tp_assign(sync_mode, wbc->sync_mode)
++#endif
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
++ tp_assign(encountered_congestion, wbc->encountered_congestion)
++#endif
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
++ tp_assign(more_io, wbc->more_io)
++#endif
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
++ tp_assign(no_nrwrite_index_update, wbc->no_nrwrite_index_update)
++#endif
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
++ TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld "
++ "sync_mode %d writeback_index %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->ret,
++ __entry->pages_written, __entry->pages_skipped,
++ __entry->sync_mode,
++ (unsigned long) __entry->writeback_index)
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld "
++ " more_io %d sync_mode %d writeback_index %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->ret,
++ __entry->pages_written, __entry->pages_skipped,
++ __entry->more_io, __entry->sync_mode,
++ (unsigned long) __entry->writeback_index)
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld "
++ " more_io %d writeback_index %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->ret,
++ __entry->pages_written, __entry->pages_skipped,
++ __entry->more_io,
++ (unsigned long) __entry->writeback_index)
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
++ TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld "
++ " more_io %d no_nrwrite_index_update %d writeback_index %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->ret,
++ __entry->pages_written, __entry->pages_skipped,
++ __entry->more_io, __entry->no_nrwrite_index_update,
++ (unsigned long) __entry->writeback_index)
++#else
++ TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld "
++ " congestion %d"
++ " more_io %d no_nrwrite_index_update %d writeback_index %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->ret,
++ __entry->pages_written, __entry->pages_skipped,
++ __entry->encountered_congestion,
++ __entry->more_io, __entry->no_nrwrite_index_update,
++ (unsigned long) __entry->writeback_index)
++#endif
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++DECLARE_EVENT_CLASS(ext4__page_op,
++ TP_PROTO(struct page *page),
++
++ TP_ARGS(page),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( pgoff_t, index )
++
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, page->mapping->host->i_sb->s_dev)
++ tp_assign(ino, page->mapping->host->i_ino)
++ tp_assign(index, page->index)
++ ),
++
++ TP_printk("dev %d,%d ino %lu page_index %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long) __entry->index)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,40))
++DEFINE_EVENT(ext4__page_op, ext4_writepage,
++
++ TP_PROTO(struct page *page),
++
++ TP_ARGS(page)
++)
++#endif
++
++DEFINE_EVENT(ext4__page_op, ext4_readpage,
++
++ TP_PROTO(struct page *page),
++
++ TP_ARGS(page)
++)
++
++DEFINE_EVENT(ext4__page_op, ext4_releasepage,
++
++ TP_PROTO(struct page *page),
++
++ TP_ARGS(page)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
++
++DECLARE_EVENT_CLASS(ext4_invalidatepage_op,
++ TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
++
++ TP_ARGS(page, offset, length),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( pgoff_t, index )
++ __field( unsigned int, offset )
++ __field( unsigned int, length )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, page->mapping->host->i_sb->s_dev)
++ tp_assign(ino, page->mapping->host->i_ino)
++ tp_assign(index, page->index)
++ tp_assign(offset, offset)
++ tp_assign(length, length)
++ ),
++
++ TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long) __entry->index,
++ __entry->offset, __entry->length)
++)
++
++DEFINE_EVENT(ext4_invalidatepage_op, ext4_invalidatepage,
++ TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
++
++ TP_ARGS(page, offset, length)
++)
++
++DEFINE_EVENT(ext4_invalidatepage_op, ext4_journalled_invalidatepage,
++ TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
++
++ TP_ARGS(page, offset, length)
++)
++
++#else
++
++TRACE_EVENT(ext4_invalidatepage,
++ TP_PROTO(struct page *page, unsigned long offset),
++
++ TP_ARGS(page, offset),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( pgoff_t, index )
++ __field( unsigned long, offset )
++
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, page->mapping->host->i_sb->s_dev)
++ tp_assign(ino, page->mapping->host->i_ino)
++ tp_assign(index, page->index)
++ tp_assign(offset, offset)
++ ),
++
++ TP_printk("dev %d,%d ino %lu page_index %lu offset %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long) __entry->index, __entry->offset)
++)
++
++#endif
++
++#endif
++
++TRACE_EVENT(ext4_discard_blocks,
++ TP_PROTO(struct super_block *sb, unsigned long long blk,
++ unsigned long long count),
++
++ TP_ARGS(sb, blk, count),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( __u64, blk )
++ __field( __u64, count )
++
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, sb->s_dev)
++ tp_assign(blk, blk)
++ tp_assign(count, count)
++ ),
++
++ TP_printk("dev %d,%d blk %llu count %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->blk, __entry->count)
++)
++
++DECLARE_EVENT_CLASS(ext4__mb_new_pa,
++ TP_PROTO(struct ext4_allocation_context *ac,
++ struct ext4_prealloc_space *pa),
++
++ TP_ARGS(ac, pa),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u64, pa_pstart )
++ __field( __u64, pa_lstart )
++ __field( __u32, pa_len )
++
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, ac->ac_sb->s_dev)
++ tp_assign(ino, ac->ac_inode->i_ino)
++ tp_assign(pa_pstart, pa->pa_pstart)
++ tp_assign(pa_lstart, pa->pa_lstart)
++ tp_assign(pa_len, pa->pa_len)
++ ),
++
++ TP_printk("dev %d,%d ino %lu pstart %llu len %u lstart %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart)
++)
++
++DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_inode_pa,
++
++ TP_PROTO(struct ext4_allocation_context *ac,
++ struct ext4_prealloc_space *pa),
++
++ TP_ARGS(ac, pa)
++)
++
++DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_group_pa,
++
++ TP_PROTO(struct ext4_allocation_context *ac,
++ struct ext4_prealloc_space *pa),
++
++ TP_ARGS(ac, pa)
++)
++
++TRACE_EVENT(ext4_mb_release_inode_pa,
++ TP_PROTO(
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40))
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++ struct super_block *sb,
++ struct inode *inode,
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
++ struct super_block *sb,
++ struct ext4_allocation_context *ac,
++#else
++ struct ext4_allocation_context *ac,
++#endif
++#endif
++ struct ext4_prealloc_space *pa,
++ unsigned long long block, unsigned int count),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,40))
++ TP_ARGS(pa, block, count),
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++ TP_ARGS(sb, inode, pa, block, count),
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
++ TP_ARGS(sb, ac, pa, block, count),
++#else
++ TP_ARGS(ac, pa, block, count),
++#endif
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u64, block )
++ __field( __u32, count )
++
++ ),
++
++ TP_fast_assign(
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,40))
++ tp_assign(dev, pa->pa_inode->i_sb->s_dev)
++ tp_assign(ino, pa->pa_inode->i_ino)
++#else
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
++ tp_assign(dev, sb->s_dev)
++#else
++ tp_assign(dev, ac->ac_sb->s_dev)
++#endif
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++ tp_assign(ino, inode->i_ino)
++#else
++ tp_assign(ino, (ac && ac->ac_inode) ? ac->ac_inode->i_ino : 0)
++#endif
++#endif
++ tp_assign(block, block)
++ tp_assign(count, count)
++ ),
++
++ TP_printk("dev %d,%d ino %lu block %llu count %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->block, __entry->count)
++)
++
++TRACE_EVENT(ext4_mb_release_group_pa,
++
++#if (LTTNG_KERNEL_RANGE(2,6,40, 3,3,0))
++ TP_PROTO(struct ext4_prealloc_space *pa),
++
++ TP_ARGS(pa),
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++ TP_PROTO(struct super_block *sb, struct ext4_prealloc_space *pa),
++
++ TP_ARGS(sb, pa),
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
++ TP_PROTO(struct super_block *sb,
++ struct ext4_allocation_context *ac,
++ struct ext4_prealloc_space *pa),
++
++ TP_ARGS(sb, ac, pa),
++#else
++ TP_PROTO(struct ext4_allocation_context *ac,
++ struct ext4_prealloc_space *pa),
++
++ TP_ARGS(ac, pa),
++#endif
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37))
++ __field( ino_t, ino )
++#endif
++ __field( __u64, pa_pstart )
++ __field( __u32, pa_len )
++
++ ),
++
++ TP_fast_assign(
++#if (LTTNG_KERNEL_RANGE(2,6,40, 3,3,0))
++ tp_assign(dev, pa->pa_inode->i_sb->s_dev)
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
++ tp_assign(dev, sb->s_dev)
++#else
++ tp_assign(dev, ac->ac_sb->s_dev)
++#endif
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37))
++ tp_assign(ino, (ac && ac->ac_inode) ? ac->ac_inode->i_ino : 0)
++#endif
++ tp_assign(pa_pstart, pa->pa_pstart)
++ tp_assign(pa_len, pa->pa_len)
++ ),
++
++ TP_printk("dev %d,%d pstart %llu len %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->pa_pstart, __entry->pa_len)
++)
++
++TRACE_EVENT(ext4_discard_preallocations,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ ),
++
++ TP_printk("dev %d,%d ino %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino)
++)
++
++TRACE_EVENT(ext4_mb_discard_preallocations,
++ TP_PROTO(struct super_block *sb, int needed),
++
++ TP_ARGS(sb, needed),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, needed )
++
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, sb->s_dev)
++ tp_assign(needed, needed)
++ ),
++
++ TP_printk("dev %d,%d needed %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->needed)
++)
++
++TRACE_EVENT(ext4_request_blocks,
++ TP_PROTO(struct ext4_allocation_request *ar),
++
++ TP_ARGS(ar),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( unsigned int, len )
++ __field( __u32, logical )
++ __field( __u32, lleft )
++ __field( __u32, lright )
++ __field( __u64, goal )
++ __field( __u64, pleft )
++ __field( __u64, pright )
++ __field( unsigned int, flags )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, ar->inode->i_sb->s_dev)
++ tp_assign(ino, ar->inode->i_ino)
++ tp_assign(len, ar->len)
++ tp_assign(logical, ar->logical)
++ tp_assign(goal, ar->goal)
++ tp_assign(lleft, ar->lleft)
++ tp_assign(lright, ar->lright)
++ tp_assign(pleft, ar->pleft)
++ tp_assign(pright, ar->pright)
++ tp_assign(flags, ar->flags)
++ ),
++
++ TP_printk("dev %d,%d ino %lu flags %u len %u lblk %u goal %llu "
++ "lleft %u lright %u pleft %llu pright %llu ",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->flags,
++ __entry->len, __entry->logical, __entry->goal,
++ __entry->lleft, __entry->lright, __entry->pleft,
++ __entry->pright)
++)
++
++TRACE_EVENT(ext4_allocate_blocks,
++ TP_PROTO(struct ext4_allocation_request *ar, unsigned long long block),
++
++ TP_ARGS(ar, block),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u64, block )
++ __field( unsigned int, len )
++ __field( __u32, logical )
++ __field( __u32, lleft )
++ __field( __u32, lright )
++ __field( __u64, goal )
++ __field( __u64, pleft )
++ __field( __u64, pright )
++ __field( unsigned int, flags )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, ar->inode->i_sb->s_dev)
++ tp_assign(ino, ar->inode->i_ino)
++ tp_assign(block, block)
++ tp_assign(len, ar->len)
++ tp_assign(logical, ar->logical)
++ tp_assign(goal, ar->goal)
++ tp_assign(lleft, ar->lleft)
++ tp_assign(lright, ar->lright)
++ tp_assign(pleft, ar->pleft)
++ tp_assign(pright, ar->pright)
++ tp_assign(flags, ar->flags)
++ ),
++
++ TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %u "
++ "goal %llu lleft %u lright %u pleft %llu pright %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->flags,
++ __entry->len, __entry->block, __entry->logical,
++ __entry->goal, __entry->lleft, __entry->lright,
++ __entry->pleft, __entry->pright)
++)
++
++TRACE_EVENT(ext4_free_blocks,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
++ TP_PROTO(struct inode *inode, __u64 block, unsigned long count,
++ int flags),
++
++ TP_ARGS(inode, block, count, flags),
++#else
++ TP_PROTO(struct inode *inode, __u64 block, unsigned long count,
++ int metadata),
++
++ TP_ARGS(inode, block, count, metadata),
++#endif
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u64, block )
++ __field( unsigned long, count )
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
++ __field( int, flags )
++ __field( TP_MODE_T, mode )
++#else
++ __field( int, metadata )
++#endif
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(block, block)
++ tp_assign(count, count)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
++ tp_assign(flags, flags)
++ tp_assign(mode, inode->i_mode)
++#else
++ tp_assign(metadata, metadata)
++#endif
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
++ TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %d",
++#else
++ TP_printk("dev %d,%d ino %lu block %llu count %lu metadata %d",
++#endif
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
++ __entry->mode, __entry->block, __entry->count,
++ __entry->flags)
++#else
++ __entry->block, __entry->count, __entry->metadata)
++#endif
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++TRACE_EVENT(ext4_sync_file_enter,
++#else
++TRACE_EVENT(ext4_sync_file,
++#endif
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ TP_PROTO(struct file *file, int datasync),
++
++ TP_ARGS(file, datasync),
++#else
++ TP_PROTO(struct file *file, struct dentry *dentry, int datasync),
++
++ TP_ARGS(file, dentry, datasync),
++#endif
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ino_t, parent )
++ __field( int, datasync )
++ ),
++
++ TP_fast_assign(
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ tp_assign(dev, file->f_path.dentry->d_inode->i_sb->s_dev)
++ tp_assign(ino, file->f_path.dentry->d_inode->i_ino)
++ tp_assign(datasync, datasync)
++ tp_assign(parent, file->f_path.dentry->d_parent->d_inode->i_ino)
++#else
++ tp_assign(dev, dentry->d_inode->i_sb->s_dev)
++ tp_assign(ino, dentry->d_inode->i_ino)
++ tp_assign(datasync, datasync)
++ tp_assign(parent, dentry->d_parent->d_inode->i_ino)
++#endif
++ ),
++
++ TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long) __entry->parent, __entry->datasync)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++TRACE_EVENT(ext4_sync_file_exit,
++ TP_PROTO(struct inode *inode, int ret),
++
++ TP_ARGS(inode, ret),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(ret, ret)
++ ),
++
++ TP_printk("dev %d,%d ino %lu ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->ret)
++)
++#endif
++
++TRACE_EVENT(ext4_sync_fs,
++ TP_PROTO(struct super_block *sb, int wait),
++
++ TP_ARGS(sb, wait),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, wait )
++
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, sb->s_dev)
++ tp_assign(wait, wait)
++ ),
++
++ TP_printk("dev %d,%d wait %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->wait)
++)
++
++TRACE_EVENT(ext4_alloc_da_blocks,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( unsigned int, data_blocks )
++ __field( unsigned int, meta_blocks )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(data_blocks, EXT4_I(inode)->i_reserved_data_blocks)
++ tp_assign(meta_blocks, EXT4_I(inode)->i_reserved_meta_blocks)
++ ),
++
++ TP_printk("dev %d,%d ino %lu data_blocks %u meta_blocks %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->data_blocks, __entry->meta_blocks)
++)
++
++TRACE_EVENT(ext4_mballoc_alloc,
++ TP_PROTO(struct ext4_allocation_context *ac),
++
++ TP_ARGS(ac),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u32, orig_logical )
++ __field( int, orig_start )
++ __field( __u32, orig_group )
++ __field( int, orig_len )
++ __field( __u32, goal_logical )
++ __field( int, goal_start )
++ __field( __u32, goal_group )
++ __field( int, goal_len )
++ __field( __u32, result_logical )
++ __field( int, result_start )
++ __field( __u32, result_group )
++ __field( int, result_len )
++ __field( __u16, found )
++ __field( __u16, groups )
++ __field( __u16, buddy )
++ __field( __u16, flags )
++ __field( __u16, tail )
++ __field( __u8, cr )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, ac->ac_inode->i_sb->s_dev)
++ tp_assign(ino, ac->ac_inode->i_ino)
++ tp_assign(orig_logical, ac->ac_o_ex.fe_logical)
++ tp_assign(orig_start, ac->ac_o_ex.fe_start)
++ tp_assign(orig_group, ac->ac_o_ex.fe_group)
++ tp_assign(orig_len, ac->ac_o_ex.fe_len)
++ tp_assign(goal_logical, ac->ac_g_ex.fe_logical)
++ tp_assign(goal_start, ac->ac_g_ex.fe_start)
++ tp_assign(goal_group, ac->ac_g_ex.fe_group)
++ tp_assign(goal_len, ac->ac_g_ex.fe_len)
++ tp_assign(result_logical, ac->ac_f_ex.fe_logical)
++ tp_assign(result_start, ac->ac_f_ex.fe_start)
++ tp_assign(result_group, ac->ac_f_ex.fe_group)
++ tp_assign(result_len, ac->ac_f_ex.fe_len)
++ tp_assign(found, ac->ac_found)
++ tp_assign(flags, ac->ac_flags)
++ tp_assign(groups, ac->ac_groups_scanned)
++ tp_assign(buddy, ac->ac_buddy)
++ tp_assign(tail, ac->ac_tail)
++ tp_assign(cr, ac->ac_criteria)
++ ),
++
++ TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u "
++ "result %u/%d/%u@%u blks %u grps %u cr %u flags 0x%04x "
++ "tail %u broken %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->orig_group, __entry->orig_start,
++ __entry->orig_len, __entry->orig_logical,
++ __entry->goal_group, __entry->goal_start,
++ __entry->goal_len, __entry->goal_logical,
++ __entry->result_group, __entry->result_start,
++ __entry->result_len, __entry->result_logical,
++ __entry->found, __entry->groups, __entry->cr,
++ __entry->flags, __entry->tail,
++ __entry->buddy ? 1 << __entry->buddy : 0)
++)
++
++TRACE_EVENT(ext4_mballoc_prealloc,
++ TP_PROTO(struct ext4_allocation_context *ac),
++
++ TP_ARGS(ac),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u32, orig_logical )
++ __field( int, orig_start )
++ __field( __u32, orig_group )
++ __field( int, orig_len )
++ __field( __u32, result_logical )
++ __field( int, result_start )
++ __field( __u32, result_group )
++ __field( int, result_len )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, ac->ac_inode->i_sb->s_dev)
++ tp_assign(ino, ac->ac_inode->i_ino)
++ tp_assign(orig_logical, ac->ac_o_ex.fe_logical)
++ tp_assign(orig_start, ac->ac_o_ex.fe_start)
++ tp_assign(orig_group, ac->ac_o_ex.fe_group)
++ tp_assign(orig_len, ac->ac_o_ex.fe_len)
++ tp_assign(result_logical, ac->ac_b_ex.fe_logical)
++ tp_assign(result_start, ac->ac_b_ex.fe_start)
++ tp_assign(result_group, ac->ac_b_ex.fe_group)
++ tp_assign(result_len, ac->ac_b_ex.fe_len)
++ ),
++
++ TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u result %u/%d/%u@%u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->orig_group, __entry->orig_start,
++ __entry->orig_len, __entry->orig_logical,
++ __entry->result_group, __entry->result_start,
++ __entry->result_len, __entry->result_logical)
++)
++
++DECLARE_EVENT_CLASS(ext4__mballoc,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++ TP_PROTO(struct super_block *sb,
++ struct inode *inode,
++ ext4_group_t group,
++ ext4_grpblk_t start,
++ ext4_grpblk_t len),
++
++ TP_ARGS(sb, inode, group, start, len),
++#else
++ TP_PROTO(struct ext4_allocation_context *ac),
++
++ TP_ARGS(ac),
++#endif
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37))
++ __field( __u32, result_logical )
++#endif
++ __field( int, result_start )
++ __field( __u32, result_group )
++ __field( int, result_len )
++ ),
++
++ TP_fast_assign(
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++ tp_assign(dev, sb->s_dev)
++ tp_assign(ino, inode ? inode->i_ino : 0)
++ tp_assign(result_start, start)
++ tp_assign(result_group, group)
++ tp_assign(result_len, len)
++#else
++ tp_assign(dev, ac->ac_sb->s_dev)
++ tp_assign(ino, ac->ac_inode ? ac->ac_inode->i_ino : 0)
++ tp_assign(result_logical, ac->ac_b_ex.fe_logical)
++ tp_assign(result_start, ac->ac_b_ex.fe_start)
++ tp_assign(result_group, ac->ac_b_ex.fe_group)
++ tp_assign(result_len, ac->ac_b_ex.fe_len)
++#endif
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++ TP_printk("dev %d,%d inode %lu extent %u/%d/%d ",
++#else
++ TP_printk("dev %d,%d inode %lu extent %u/%d/%u@%u ",
++#endif
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->result_group, __entry->result_start,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++ __entry->result_len
++#else
++ __entry->result_len, __entry->result_logical
++#endif
++ )
++)
++
++DEFINE_EVENT(ext4__mballoc, ext4_mballoc_discard,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++ TP_PROTO(struct super_block *sb,
++ struct inode *inode,
++ ext4_group_t group,
++ ext4_grpblk_t start,
++ ext4_grpblk_t len),
++
++ TP_ARGS(sb, inode, group, start, len)
++#else
++ TP_PROTO(struct ext4_allocation_context *ac),
++
++ TP_ARGS(ac)
++#endif
++)
++
++DEFINE_EVENT(ext4__mballoc, ext4_mballoc_free,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++ TP_PROTO(struct super_block *sb,
++ struct inode *inode,
++ ext4_group_t group,
++ ext4_grpblk_t start,
++ ext4_grpblk_t len),
++
++ TP_ARGS(sb, inode, group, start, len)
++#else
++ TP_PROTO(struct ext4_allocation_context *ac),
++
++ TP_ARGS(ac)
++#endif
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
++TRACE_EVENT(ext4_forget,
++ TP_PROTO(struct inode *inode, int is_metadata, __u64 block),
++
++ TP_ARGS(inode, is_metadata, block),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u64, block )
++ __field( int, is_metadata )
++ __field( TP_MODE_T, mode )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(block, block)
++ tp_assign(is_metadata, is_metadata)
++ tp_assign(mode, inode->i_mode)
++ ),
++
++ TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->mode, __entry->is_metadata, __entry->block)
++)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
++TRACE_EVENT(ext4_da_update_reserve_space,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
++ TP_PROTO(struct inode *inode, int used_blocks, int quota_claim),
++
++ TP_ARGS(inode, used_blocks, quota_claim),
++#else
++ TP_PROTO(struct inode *inode, int used_blocks),
++
++ TP_ARGS(inode, used_blocks),
++#endif
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u64, i_blocks )
++ __field( int, used_blocks )
++ __field( int, reserved_data_blocks )
++ __field( int, reserved_meta_blocks )
++ __field( int, allocated_meta_blocks )
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
++ __field( int, quota_claim )
++#endif
++ __field( TP_MODE_T, mode )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(i_blocks, inode->i_blocks)
++ tp_assign(used_blocks, used_blocks)
++ tp_assign(reserved_data_blocks,
++ EXT4_I(inode)->i_reserved_data_blocks)
++ tp_assign(reserved_meta_blocks,
++ EXT4_I(inode)->i_reserved_meta_blocks)
++ tp_assign(allocated_meta_blocks,
++ EXT4_I(inode)->i_allocated_meta_blocks)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
++ tp_assign(quota_claim, quota_claim)
++#endif
++ tp_assign(mode, inode->i_mode)
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
++ TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d "
++ "reserved_data_blocks %d reserved_meta_blocks %d "
++ "allocated_meta_blocks %d quota_claim %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->mode, __entry->i_blocks,
++ __entry->used_blocks, __entry->reserved_data_blocks,
++ __entry->reserved_meta_blocks, __entry->allocated_meta_blocks,
++ __entry->quota_claim)
++#else
++ TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d "
++ "reserved_data_blocks %d reserved_meta_blocks %d "
++ "allocated_meta_blocks %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->mode, __entry->i_blocks,
++ __entry->used_blocks, __entry->reserved_data_blocks,
++ __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
++#endif
++)
++
++TRACE_EVENT(ext4_da_reserve_space,
++ TP_PROTO(struct inode *inode, int md_needed),
++
++ TP_ARGS(inode, md_needed),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u64, i_blocks )
++ __field( int, md_needed )
++ __field( int, reserved_data_blocks )
++ __field( int, reserved_meta_blocks )
++ __field( TP_MODE_T, mode )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(i_blocks, inode->i_blocks)
++ tp_assign(md_needed, md_needed)
++ tp_assign(reserved_data_blocks,
++ EXT4_I(inode)->i_reserved_data_blocks)
++ tp_assign(reserved_meta_blocks,
++ EXT4_I(inode)->i_reserved_meta_blocks)
++ tp_assign(mode, inode->i_mode)
++ ),
++
++ TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu md_needed %d "
++ "reserved_data_blocks %d reserved_meta_blocks %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->mode, __entry->i_blocks,
++ __entry->md_needed, __entry->reserved_data_blocks,
++ __entry->reserved_meta_blocks)
++)
++
++TRACE_EVENT(ext4_da_release_space,
++ TP_PROTO(struct inode *inode, int freed_blocks),
++
++ TP_ARGS(inode, freed_blocks),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u64, i_blocks )
++ __field( int, freed_blocks )
++ __field( int, reserved_data_blocks )
++ __field( int, reserved_meta_blocks )
++ __field( int, allocated_meta_blocks )
++ __field( TP_MODE_T, mode )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(i_blocks, inode->i_blocks)
++ tp_assign(freed_blocks, freed_blocks)
++ tp_assign(reserved_data_blocks,
++ EXT4_I(inode)->i_reserved_data_blocks)
++ tp_assign(reserved_meta_blocks,
++ EXT4_I(inode)->i_reserved_meta_blocks)
++ tp_assign(allocated_meta_blocks,
++ EXT4_I(inode)->i_allocated_meta_blocks)
++ tp_assign(mode, inode->i_mode)
++ ),
++
++ TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu freed_blocks %d "
++ "reserved_data_blocks %d reserved_meta_blocks %d "
++ "allocated_meta_blocks %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->mode, __entry->i_blocks,
++ __entry->freed_blocks, __entry->reserved_data_blocks,
++ __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
++)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++DECLARE_EVENT_CLASS(ext4__bitmap_load,
++ TP_PROTO(struct super_block *sb, unsigned long group),
++
++ TP_ARGS(sb, group),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( __u32, group )
++
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, sb->s_dev)
++ tp_assign(group, group)
++ ),
++
++ TP_printk("dev %d,%d group %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->group)
++)
++
++DEFINE_EVENT(ext4__bitmap_load, ext4_mb_bitmap_load,
++
++ TP_PROTO(struct super_block *sb, unsigned long group),
++
++ TP_ARGS(sb, group)
++)
++
++DEFINE_EVENT(ext4__bitmap_load, ext4_mb_buddy_bitmap_load,
++
++ TP_PROTO(struct super_block *sb, unsigned long group),
++
++ TP_ARGS(sb, group)
++)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++DEFINE_EVENT(ext4__bitmap_load, ext4_read_block_bitmap_load,
++
++ TP_PROTO(struct super_block *sb, unsigned long group),
++
++ TP_ARGS(sb, group)
++)
++
++DEFINE_EVENT(ext4__bitmap_load, ext4_load_inode_bitmap,
++
++ TP_PROTO(struct super_block *sb, unsigned long group),
++
++ TP_ARGS(sb, group)
++)
++
++TRACE_EVENT(ext4_direct_IO_enter,
++ TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
++
++ TP_ARGS(inode, offset, len, rw),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( loff_t, pos )
++ __field( unsigned long, len )
++ __field( int, rw )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(pos, offset)
++ tp_assign(len, len)
++ tp_assign(rw, rw)
++ ),
++
++ TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->pos, __entry->len, __entry->rw)
++)
++
++TRACE_EVENT(ext4_direct_IO_exit,
++ TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
++ int rw, int ret),
++
++ TP_ARGS(inode, offset, len, rw, ret),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( loff_t, pos )
++ __field( unsigned long, len )
++ __field( int, rw )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(pos, offset)
++ tp_assign(len, len)
++ tp_assign(rw, rw)
++ tp_assign(ret, ret)
++ ),
++
++ TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->pos, __entry->len,
++ __entry->rw, __entry->ret)
++)
++
++TRACE_EVENT(ext4_fallocate_enter,
++ TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode),
++
++ TP_ARGS(inode, offset, len, mode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( loff_t, pos )
++ __field( loff_t, len )
++ __field( int, mode )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(pos, offset)
++ tp_assign(len, len)
++ tp_assign(mode, mode)
++ ),
++
++ TP_printk("dev %d,%d ino %lu pos %lld len %lld mode %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->pos,
++ __entry->len, __entry->mode)
++)
++
++TRACE_EVENT(ext4_fallocate_exit,
++ TP_PROTO(struct inode *inode, loff_t offset,
++ unsigned int max_blocks, int ret),
++
++ TP_ARGS(inode, offset, max_blocks, ret),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( loff_t, pos )
++ __field( unsigned int, blocks )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(pos, offset)
++ tp_assign(blocks, max_blocks)
++ tp_assign(ret, ret)
++ ),
++
++ TP_printk("dev %d,%d ino %lu pos %lld blocks %u ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->pos, __entry->blocks,
++ __entry->ret)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
++
++TRACE_EVENT(ext4_punch_hole,
++ TP_PROTO(struct inode *inode, loff_t offset, loff_t len),
++
++ TP_ARGS(inode, offset, len),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( loff_t, offset )
++ __field( loff_t, len )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(offset, offset)
++ tp_assign(len, len)
++ ),
++
++ TP_printk("dev %d,%d ino %lu offset %lld len %lld",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->offset, __entry->len)
++)
++
++#endif
++
++TRACE_EVENT(ext4_unlink_enter,
++ TP_PROTO(struct inode *parent, struct dentry *dentry),
++
++ TP_ARGS(parent, dentry),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ino_t, parent )
++ __field( loff_t, size )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, dentry->d_inode->i_sb->s_dev)
++ tp_assign(ino, dentry->d_inode->i_ino)
++ tp_assign(parent, parent->i_ino)
++ tp_assign(size, dentry->d_inode->i_size)
++ ),
++
++ TP_printk("dev %d,%d ino %lu size %lld parent %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->size,
++ (unsigned long) __entry->parent)
++)
++
++TRACE_EVENT(ext4_unlink_exit,
++ TP_PROTO(struct dentry *dentry, int ret),
++
++ TP_ARGS(dentry, ret),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, dentry->d_inode->i_sb->s_dev)
++ tp_assign(ino, dentry->d_inode->i_ino)
++ tp_assign(ret, ret)
++ ),
++
++ TP_printk("dev %d,%d ino %lu ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->ret)
++)
++
++DECLARE_EVENT_CLASS(ext4__truncate,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u64, blocks )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(blocks, inode->i_blocks)
++ ),
++
++ TP_printk("dev %d,%d ino %lu blocks %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->blocks)
++)
++
++DEFINE_EVENT(ext4__truncate, ext4_truncate_enter,
++
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode)
++)
++
++DEFINE_EVENT(ext4__truncate, ext4_truncate_exit,
++
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
++/* 'ux' is the uninitialized extent. */
++TRACE_EVENT(ext4_ext_convert_to_initialized_enter,
++ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
++ struct ext4_extent *ux),
++
++ TP_ARGS(inode, map, ux),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, m_lblk )
++ __field( unsigned, m_len )
++ __field( ext4_lblk_t, u_lblk )
++ __field( unsigned, u_len )
++ __field( ext4_fsblk_t, u_pblk )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(m_lblk, map->m_lblk)
++ tp_assign(m_len, map->m_len)
++ tp_assign(u_lblk, le32_to_cpu(ux->ee_block))
++ tp_assign(u_len, ext4_ext_get_actual_len(ux))
++ tp_assign(u_pblk, ext4_ext_pblock(ux))
++ ),
++
++ TP_printk("dev %d,%d ino %lu m_lblk %u m_len %u u_lblk %u u_len %u "
++ "u_pblk %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->m_lblk, __entry->m_len,
++ __entry->u_lblk, __entry->u_len, __entry->u_pblk)
++)
++
++/*
++ * 'ux' is the uninitialized extent.
++ * 'ix' is the initialized extent to which blocks are transferred.
++ */
++TRACE_EVENT(ext4_ext_convert_to_initialized_fastpath,
++ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
++ struct ext4_extent *ux, struct ext4_extent *ix),
++
++ TP_ARGS(inode, map, ux, ix),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, m_lblk )
++ __field( unsigned, m_len )
++ __field( ext4_lblk_t, u_lblk )
++ __field( unsigned, u_len )
++ __field( ext4_fsblk_t, u_pblk )
++ __field( ext4_lblk_t, i_lblk )
++ __field( unsigned, i_len )
++ __field( ext4_fsblk_t, i_pblk )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(m_lblk, map->m_lblk)
++ tp_assign(m_len, map->m_len)
++ tp_assign(u_lblk, le32_to_cpu(ux->ee_block))
++ tp_assign(u_len, ext4_ext_get_actual_len(ux))
++ tp_assign(u_pblk, ext4_ext_pblock(ux))
++ tp_assign(i_lblk, le32_to_cpu(ix->ee_block))
++ tp_assign(i_len, ext4_ext_get_actual_len(ix))
++ tp_assign(i_pblk, ext4_ext_pblock(ix))
++ ),
++
++ TP_printk("dev %d,%d ino %lu m_lblk %u m_len %u "
++ "u_lblk %u u_len %u u_pblk %llu "
++ "i_lblk %u i_len %u i_pblk %llu ",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->m_lblk, __entry->m_len,
++ __entry->u_lblk, __entry->u_len, __entry->u_pblk,
++ __entry->i_lblk, __entry->i_len, __entry->i_pblk)
++)
++#endif
++
++DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
++ unsigned int len, unsigned int flags),
++
++ TP_ARGS(inode, lblk, len, flags),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, lblk )
++ __field( unsigned int, len )
++ __field( unsigned int, flags )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(lblk, lblk)
++ tp_assign(len, len)
++ tp_assign(flags, flags)
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %u len %u flags %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->lblk, __entry->len, __entry->flags)
++)
++
++DEFINE_EVENT(ext4__map_blocks_enter, ext4_ext_map_blocks_enter,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
++ unsigned len, unsigned flags),
++
++ TP_ARGS(inode, lblk, len, flags)
++)
++
++DEFINE_EVENT(ext4__map_blocks_enter, ext4_ind_map_blocks_enter,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
++ unsigned len, unsigned flags),
++
++ TP_ARGS(inode, lblk, len, flags)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
++
++DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
++ TP_PROTO(struct inode *inode, unsigned flags, struct ext4_map_blocks *map,
++ int ret),
++
++ TP_ARGS(inode, flags, map, ret),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( unsigned int, flags )
++ __field( ext4_fsblk_t, pblk )
++ __field( ext4_lblk_t, lblk )
++ __field( unsigned int, len )
++ __field( unsigned int, mflags )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(flags, flags)
++ tp_assign(pblk, map->m_pblk)
++ tp_assign(lblk, map->m_lblk)
++ tp_assign(len, map->m_len)
++ tp_assign(mflags, map->m_flags)
++ tp_assign(ret, ret)
++ ),
++
++ TP_printk("dev %d,%d ino %lu flags %s lblk %u pblk %llu len %u "
++ "mflags %s ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ show_map_flags(__entry->flags), __entry->lblk, __entry->pblk,
++ __entry->len, show_mflags(__entry->mflags), __entry->ret)
++)
++
++DEFINE_EVENT(ext4__map_blocks_exit, ext4_ext_map_blocks_exit,
++ TP_PROTO(struct inode *inode, unsigned flags,
++ struct ext4_map_blocks *map, int ret),
++
++ TP_ARGS(inode, flags, map, ret)
++)
++
++DEFINE_EVENT(ext4__map_blocks_exit, ext4_ind_map_blocks_exit,
++ TP_PROTO(struct inode *inode, unsigned flags,
++ struct ext4_map_blocks *map, int ret),
++
++ TP_ARGS(inode, flags, map, ret)
++)
++
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
++
++DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
++ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int ret),
++
++ TP_ARGS(inode, map, ret),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_fsblk_t, pblk )
++ __field( ext4_lblk_t, lblk )
++ __field( unsigned int, len )
++ __field( unsigned int, flags )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(pblk, map->m_pblk)
++ tp_assign(lblk, map->m_lblk)
++ tp_assign(len, map->m_len)
++ tp_assign(flags, map->m_flags)
++ tp_assign(ret, ret)
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u flags %x ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->lblk, __entry->pblk,
++ __entry->len, __entry->flags, __entry->ret)
++)
++
++DEFINE_EVENT(ext4__map_blocks_exit, ext4_ext_map_blocks_exit,
++ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int ret),
++
++ TP_ARGS(inode, map, ret)
++)
++
++DEFINE_EVENT(ext4__map_blocks_exit, ext4_ind_map_blocks_exit,
++ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int ret),
++
++ TP_ARGS(inode, map, ret)
++)
++
++#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) */
++
++DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
++ ext4_fsblk_t pblk, unsigned int len, int ret),
++
++ TP_ARGS(inode, lblk, pblk, len, ret),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_fsblk_t, pblk )
++ __field( ext4_lblk_t, lblk )
++ __field( unsigned int, len )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(pblk, pblk)
++ tp_assign(lblk, lblk)
++ tp_assign(len, len)
++ tp_assign(ret, ret)
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->lblk, __entry->pblk,
++ __entry->len, __entry->ret)
++)
++
++DEFINE_EVENT(ext4__map_blocks_exit, ext4_ext_map_blocks_exit,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
++ ext4_fsblk_t pblk, unsigned len, int ret),
++
++ TP_ARGS(inode, lblk, pblk, len, ret)
++)
++
++DEFINE_EVENT(ext4__map_blocks_exit, ext4_ind_map_blocks_exit,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
++ ext4_fsblk_t pblk, unsigned len, int ret),
++
++ TP_ARGS(inode, lblk, pblk, len, ret)
++)
++
++#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) */
++
++TRACE_EVENT(ext4_ext_load_extent,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk),
++
++ TP_ARGS(inode, lblk, pblk),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_fsblk_t, pblk )
++ __field( ext4_lblk_t, lblk )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(pblk, pblk)
++ tp_assign(lblk, lblk)
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %u pblk %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->lblk, __entry->pblk)
++)
++
++TRACE_EVENT(ext4_load_inode,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ ),
++
++ TP_printk("dev %d,%d ino %ld",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino)
++)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
++
++TRACE_EVENT(ext4_journal_start,
++ TP_PROTO(struct super_block *sb, int blocks, int rsv_blocks,
++ unsigned long IP),
++
++ TP_ARGS(sb, blocks, rsv_blocks, IP),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field(unsigned long, ip )
++ __field( int, blocks )
++ __field( int, rsv_blocks )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, sb->s_dev)
++ tp_assign(ip, IP)
++ tp_assign(blocks, blocks)
++ tp_assign(rsv_blocks, rsv_blocks)
++ ),
++
++ TP_printk("dev %d,%d blocks, %d rsv_blocks, %d caller %pF",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->blocks, __entry->rsv_blocks, (void *)__entry->ip)
++)
++
++TRACE_EVENT(ext4_journal_start_reserved,
++ TP_PROTO(struct super_block *sb, int blocks, unsigned long IP),
++
++ TP_ARGS(sb, blocks, IP),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field(unsigned long, ip )
++ __field( int, blocks )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, sb->s_dev)
++ tp_assign(ip, IP)
++ tp_assign(blocks, blocks)
++ ),
++
++ TP_printk("dev %d,%d blocks, %d caller %pF",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->blocks, (void *)__entry->ip)
++)
++
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
++
++TRACE_EVENT(ext4_journal_start,
++ TP_PROTO(struct super_block *sb, int nblocks, unsigned long IP),
++
++ TP_ARGS(sb, nblocks, IP),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field(unsigned long, ip )
++ __field( int, nblocks )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, sb->s_dev)
++ tp_assign(ip, IP)
++ tp_assign(nblocks, nblocks)
++ ),
++
++ TP_printk("dev %d,%d nblocks %d caller %pF",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->nblocks, (void *)__entry->ip)
++)
++
++DECLARE_EVENT_CLASS(ext4__trim,
++ TP_PROTO(struct super_block *sb,
++ ext4_group_t group,
++ ext4_grpblk_t start,
++ ext4_grpblk_t len),
++
++ TP_ARGS(sb, group, start, len),
++
++ TP_STRUCT__entry(
++ __field( int, dev_major )
++ __field( int, dev_minor )
++ __field( __u32, group )
++ __field( int, start )
++ __field( int, len )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev_major, MAJOR(sb->s_dev))
++ tp_assign(dev_minor, MINOR(sb->s_dev))
++ tp_assign(group, group)
++ tp_assign(start, start)
++ tp_assign(len, len)
++ ),
++
++ TP_printk("dev %d,%d group %u, start %d, len %d",
++ __entry->dev_major, __entry->dev_minor,
++ __entry->group, __entry->start, __entry->len)
++)
++
++DEFINE_EVENT(ext4__trim, ext4_trim_extent,
++
++ TP_PROTO(struct super_block *sb,
++ ext4_group_t group,
++ ext4_grpblk_t start,
++ ext4_grpblk_t len),
++
++ TP_ARGS(sb, group, start, len)
++)
++
++DEFINE_EVENT(ext4__trim, ext4_trim_all_free,
++
++ TP_PROTO(struct super_block *sb,
++ ext4_group_t group,
++ ext4_grpblk_t start,
++ ext4_grpblk_t len),
++
++ TP_ARGS(sb, group, start, len)
++)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
++
++TRACE_EVENT(ext4_ext_handle_uninitialized_extents,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
++ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int flags,
++ unsigned int allocated, ext4_fsblk_t newblock),
++
++ TP_ARGS(inode, map, flags, allocated, newblock),
++#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) */
++ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
++ unsigned int allocated, ext4_fsblk_t newblock),
++
++ TP_ARGS(inode, map, allocated, newblock),
++#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) */
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( int, flags )
++ __field( ext4_lblk_t, lblk )
++ __field( ext4_fsblk_t, pblk )
++ __field( unsigned int, len )
++ __field( unsigned int, allocated )
++ __field( ext4_fsblk_t, newblk )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
++ tp_assign(flags, flags)
++#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) */
++ tp_assign(flags, map->m_flags)
++#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) */
++ tp_assign(lblk, map->m_lblk)
++ tp_assign(pblk, map->m_pblk)
++ tp_assign(len, map->m_len)
++ tp_assign(allocated, allocated)
++ tp_assign(newblk, newblock)
++ ),
++
++ TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %d"
++ "allocated %d newblock %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->lblk, (unsigned long long) __entry->pblk,
++ __entry->len, __entry->flags,
++ (unsigned int) __entry->allocated,
++ (unsigned long long) __entry->newblk)
++)
++
++TRACE_EVENT(ext4_get_implied_cluster_alloc_exit,
++ TP_PROTO(struct super_block *sb, struct ext4_map_blocks *map, int ret),
++
++ TP_ARGS(sb, map, ret),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( unsigned int, flags )
++ __field( ext4_lblk_t, lblk )
++ __field( ext4_fsblk_t, pblk )
++ __field( unsigned int, len )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, sb->s_dev)
++ tp_assign(flags, map->m_flags)
++ tp_assign(lblk, map->m_lblk)
++ tp_assign(pblk, map->m_pblk)
++ tp_assign(len, map->m_len)
++ tp_assign(ret, ret)
++ ),
++
++ TP_printk("dev %d,%d m_lblk %u m_pblk %llu m_len %u m_flags %u ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->lblk, (unsigned long long) __entry->pblk,
++ __entry->len, __entry->flags, __entry->ret)
++)
++
++TRACE_EVENT(ext4_ext_put_in_cache,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len,
++ ext4_fsblk_t start),
++
++ TP_ARGS(inode, lblk, len, start),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, lblk )
++ __field( unsigned int, len )
++ __field( ext4_fsblk_t, start )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(lblk, lblk)
++ tp_assign(len, len)
++ tp_assign(start, start)
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %u len %u start %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->lblk,
++ __entry->len,
++ (unsigned long long) __entry->start)
++)
++
++TRACE_EVENT(ext4_ext_in_cache,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk, int ret),
++
++ TP_ARGS(inode, lblk, ret),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, lblk )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(lblk, lblk)
++ tp_assign(ret, ret)
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %u ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->lblk,
++ __entry->ret)
++
++)
++
++TRACE_EVENT(ext4_find_delalloc_range,
++ TP_PROTO(struct inode *inode, ext4_lblk_t from, ext4_lblk_t to,
++ int reverse, int found, ext4_lblk_t found_blk),
++
++ TP_ARGS(inode, from, to, reverse, found, found_blk),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, from )
++ __field( ext4_lblk_t, to )
++ __field( int, reverse )
++ __field( int, found )
++ __field( ext4_lblk_t, found_blk )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(from, from)
++ tp_assign(to, to)
++ tp_assign(reverse, reverse)
++ tp_assign(found, found)
++ tp_assign(found_blk, found_blk)
++ ),
++
++ TP_printk("dev %d,%d ino %lu from %u to %u reverse %d found %d "
++ "(blk = %u)",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->from, (unsigned) __entry->to,
++ __entry->reverse, __entry->found,
++ (unsigned) __entry->found_blk)
++)
++
++TRACE_EVENT(ext4_get_reserved_cluster_alloc,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len),
++
++ TP_ARGS(inode, lblk, len),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, lblk )
++ __field( unsigned int, len )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(lblk, lblk)
++ tp_assign(len, len)
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %u len %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->lblk,
++ __entry->len)
++)
++
++TRACE_EVENT(ext4_ext_show_extent,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
++ unsigned short len),
++
++ TP_ARGS(inode, lblk, pblk, len),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_fsblk_t, pblk )
++ __field( ext4_lblk_t, lblk )
++ __field( unsigned short, len )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(pblk, pblk)
++ tp_assign(lblk, lblk)
++ tp_assign(len, len)
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->lblk,
++ (unsigned long long) __entry->pblk,
++ (unsigned short) __entry->len)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
++
++TRACE_EVENT(ext4_remove_blocks,
++ TP_PROTO(struct inode *inode, struct ext4_extent *ex,
++ ext4_lblk_t from, ext4_fsblk_t to,
++ long long partial_cluster),
++
++ TP_ARGS(inode, ex, from, to, partial_cluster),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, from )
++ __field( ext4_lblk_t, to )
++ __field( long long, partial )
++ __field( ext4_fsblk_t, ee_pblk )
++ __field( ext4_lblk_t, ee_lblk )
++ __field( unsigned short, ee_len )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(from, from)
++ tp_assign(to, to)
++ tp_assign(partial, partial_cluster)
++ tp_assign(ee_pblk, ext4_ext_pblock(ex))
++ tp_assign(ee_lblk, le32_to_cpu(ex->ee_block))
++ tp_assign(ee_len, ext4_ext_get_actual_len(ex))
++ ),
++
++ TP_printk("dev %d,%d ino %lu extent [%u(%llu), %u]"
++ "from %u to %u partial_cluster %lld",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->ee_lblk,
++ (unsigned long long) __entry->ee_pblk,
++ (unsigned short) __entry->ee_len,
++ (unsigned) __entry->from,
++ (unsigned) __entry->to,
++ (long long) __entry->partial)
++)
++
++#else
++
++TRACE_EVENT(ext4_remove_blocks,
++ TP_PROTO(struct inode *inode, struct ext4_extent *ex,
++ ext4_lblk_t from, ext4_fsblk_t to,
++ ext4_fsblk_t partial_cluster),
++
++ TP_ARGS(inode, ex, from, to, partial_cluster),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, from )
++ __field( ext4_lblk_t, to )
++ __field( ext4_fsblk_t, partial )
++ __field( ext4_fsblk_t, ee_pblk )
++ __field( ext4_lblk_t, ee_lblk )
++ __field( unsigned short, ee_len )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(from, from)
++ tp_assign(to, to)
++ tp_assign(partial, partial_cluster)
++ tp_assign(ee_pblk, ext4_ext_pblock(ex))
++ tp_assign(ee_lblk, cpu_to_le32(ex->ee_block))
++ tp_assign(ee_len, ext4_ext_get_actual_len(ex))
++ ),
++
++ TP_printk("dev %d,%d ino %lu extent [%u(%llu), %u]"
++ "from %u to %u partial_cluster %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->ee_lblk,
++ (unsigned long long) __entry->ee_pblk,
++ (unsigned short) __entry->ee_len,
++ (unsigned) __entry->from,
++ (unsigned) __entry->to,
++ (unsigned) __entry->partial)
++)
++
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
++
++TRACE_EVENT(ext4_ext_rm_leaf,
++ TP_PROTO(struct inode *inode, ext4_lblk_t start,
++ struct ext4_extent *ex,
++ long long partial_cluster),
++
++ TP_ARGS(inode, start, ex, partial_cluster),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( long long, partial )
++ __field( ext4_lblk_t, start )
++ __field( ext4_lblk_t, ee_lblk )
++ __field( ext4_fsblk_t, ee_pblk )
++ __field( short, ee_len )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(partial, partial_cluster)
++ tp_assign(start, start)
++ tp_assign(ee_lblk, le32_to_cpu(ex->ee_block))
++ tp_assign(ee_pblk, ext4_ext_pblock(ex))
++ tp_assign(ee_len, ext4_ext_get_actual_len(ex))
++ ),
++
++ TP_printk("dev %d,%d ino %lu start_lblk %u last_extent [%u(%llu), %u]"
++ "partial_cluster %lld",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->start,
++ (unsigned) __entry->ee_lblk,
++ (unsigned long long) __entry->ee_pblk,
++ (unsigned short) __entry->ee_len,
++ (long long) __entry->partial)
++)
++
++#else
++
++TRACE_EVENT(ext4_ext_rm_leaf,
++ TP_PROTO(struct inode *inode, ext4_lblk_t start,
++ struct ext4_extent *ex, ext4_fsblk_t partial_cluster),
++
++ TP_ARGS(inode, start, ex, partial_cluster),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_fsblk_t, partial )
++ __field( ext4_lblk_t, start )
++ __field( ext4_lblk_t, ee_lblk )
++ __field( ext4_fsblk_t, ee_pblk )
++ __field( short, ee_len )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(partial, partial_cluster)
++ tp_assign(start, start)
++ tp_assign(ee_lblk, le32_to_cpu(ex->ee_block))
++ tp_assign(ee_pblk, ext4_ext_pblock(ex))
++ tp_assign(ee_len, ext4_ext_get_actual_len(ex))
++ ),
++
++ TP_printk("dev %d,%d ino %lu start_lblk %u last_extent [%u(%llu), %u]"
++ "partial_cluster %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->start,
++ (unsigned) __entry->ee_lblk,
++ (unsigned long long) __entry->ee_pblk,
++ (unsigned short) __entry->ee_len,
++ (unsigned) __entry->partial)
++)
++
++#endif
++
++TRACE_EVENT(ext4_ext_rm_idx,
++ TP_PROTO(struct inode *inode, ext4_fsblk_t pblk),
++
++ TP_ARGS(inode, pblk),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_fsblk_t, pblk )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(pblk, pblk)
++ ),
++
++ TP_printk("dev %d,%d ino %lu index_pblk %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long long) __entry->pblk)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
++
++TRACE_EVENT(ext4_ext_remove_space,
++ TP_PROTO(struct inode *inode, ext4_lblk_t start,
++ ext4_lblk_t end, int depth),
++
++ TP_ARGS(inode, start, end, depth),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, start )
++ __field( ext4_lblk_t, end )
++ __field( int, depth )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(start, start)
++ tp_assign(end, end)
++ tp_assign(depth, depth)
++ ),
++
++ TP_printk("dev %d,%d ino %lu since %u end %u depth %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->start,
++ (unsigned) __entry->end,
++ __entry->depth)
++)
++
++#else
++
++TRACE_EVENT(ext4_ext_remove_space,
++ TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth),
++
++ TP_ARGS(inode, start, depth),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, start )
++ __field( int, depth )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(start, start)
++ tp_assign(depth, depth)
++ ),
++
++ TP_printk("dev %d,%d ino %lu since %u depth %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->start,
++ __entry->depth)
++)
++
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
++
++TRACE_EVENT(ext4_ext_remove_space_done,
++ TP_PROTO(struct inode *inode, ext4_lblk_t start, ext4_lblk_t end,
++ int depth, long long partial, __le16 eh_entries),
++
++ TP_ARGS(inode, start, end, depth, partial, eh_entries),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, start )
++ __field( ext4_lblk_t, end )
++ __field( int, depth )
++ __field( long long, partial )
++ __field( unsigned short, eh_entries )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(start, start)
++ tp_assign(end, end)
++ tp_assign(depth, depth)
++ tp_assign(partial, partial)
++ tp_assign(eh_entries, le16_to_cpu(eh_entries))
++ ),
++
++ TP_printk("dev %d,%d ino %lu since %u end %u depth %d partial %lld "
++ "remaining_entries %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->start,
++ (unsigned) __entry->end,
++ __entry->depth,
++ (long long) __entry->partial,
++ (unsigned short) __entry->eh_entries)
++)
++
++#else
++
++TRACE_EVENT(ext4_ext_remove_space_done,
++ TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth,
++ ext4_lblk_t partial, unsigned short eh_entries),
++
++ TP_ARGS(inode, start, depth, partial, eh_entries),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, start )
++ __field( int, depth )
++ __field( ext4_lblk_t, partial )
++ __field( unsigned short, eh_entries )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(start, start)
++ tp_assign(depth, depth)
++ tp_assign(partial, partial)
++ tp_assign(eh_entries, eh_entries)
++ ),
++
++ TP_printk("dev %d,%d ino %lu since %u depth %d partial %u "
++ "remaining_entries %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->start,
++ __entry->depth,
++ (unsigned) __entry->partial,
++ (unsigned short) __entry->eh_entries)
++)
++
++#endif
++
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
++
++DECLARE_EVENT_CLASS(ext4__es_extent,
++ TP_PROTO(struct inode *inode, struct extent_status *es),
++
++ TP_ARGS(inode, es),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, lblk )
++ __field( ext4_lblk_t, len )
++ __field( ext4_fsblk_t, pblk )
++ __field( char, status )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(lblk, es->es_lblk)
++ tp_assign(len, es->es_len)
++ tp_assign(pblk, ext4_es_pblock(es))
++ tp_assign(status, ext4_es_status(es))
++ ),
++
++ TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->lblk, __entry->len,
++ __entry->pblk, show_extent_status(__entry->status))
++)
++
++DEFINE_EVENT(ext4__es_extent, ext4_es_insert_extent,
++ TP_PROTO(struct inode *inode, struct extent_status *es),
++
++ TP_ARGS(inode, es)
++)
++
++DEFINE_EVENT(ext4__es_extent, ext4_es_cache_extent,
++ TP_PROTO(struct inode *inode, struct extent_status *es),
++
++ TP_ARGS(inode, es)
++)
++
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
++
++TRACE_EVENT(ext4_es_insert_extent,
++ TP_PROTO(struct inode *inode, struct extent_status *es),
++
++ TP_ARGS(inode, es),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, lblk )
++ __field( ext4_lblk_t, len )
++ __field( ext4_fsblk_t, pblk )
++ __field( char, status )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(lblk, es->es_lblk)
++ tp_assign(len, es->es_len)
++ tp_assign(pblk, ext4_es_pblock(es))
++ tp_assign(status, ext4_es_status(es) >> 60)
++ ),
++
++ TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->lblk, __entry->len,
++ __entry->pblk, show_extent_status(__entry->status))
++)
++
++TRACE_EVENT(ext4_es_remove_extent,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len),
++
++ TP_ARGS(inode, lblk, len),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( loff_t, lblk )
++ __field( loff_t, len )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(lblk, lblk)
++ tp_assign(len, len)
++ ),
++
++ TP_printk("dev %d,%d ino %lu es [%lld/%lld)",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->lblk, __entry->len)
++)
++
++TRACE_EVENT(ext4_es_find_delayed_extent_range_enter,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk),
++
++ TP_ARGS(inode, lblk),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, lblk )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(lblk, lblk)
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->lblk)
++)
++
++TRACE_EVENT(ext4_es_find_delayed_extent_range_exit,
++ TP_PROTO(struct inode *inode, struct extent_status *es),
++
++ TP_ARGS(inode, es),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, lblk )
++ __field( ext4_lblk_t, len )
++ __field( ext4_fsblk_t, pblk )
++ __field( char, status )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(lblk, es->es_lblk)
++ tp_assign(len, es->es_len)
++ tp_assign(pblk, ext4_es_pblock(es))
++ tp_assign(status, ext4_es_status(es) >> 60)
++ ),
++
++ TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->lblk, __entry->len,
++ __entry->pblk, show_extent_status(__entry->status))
++)
++
++TRACE_EVENT(ext4_es_lookup_extent_enter,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk),
++
++ TP_ARGS(inode, lblk),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, lblk )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(lblk, lblk)
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->lblk)
++)
++
++TRACE_EVENT(ext4_es_lookup_extent_exit,
++ TP_PROTO(struct inode *inode, struct extent_status *es,
++ int found),
++
++ TP_ARGS(inode, es, found),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, lblk )
++ __field( ext4_lblk_t, len )
++ __field( ext4_fsblk_t, pblk )
++ __field( char, status )
++ __field( int, found )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(lblk, es->es_lblk)
++ tp_assign(len, es->es_len)
++ tp_assign(pblk, ext4_es_pblock(es))
++ tp_assign(status, ext4_es_status(es) >> 60)
++ tp_assign(found, found)
++ ),
++
++ TP_printk("dev %d,%d ino %lu found %d [%u/%u) %llu %s",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->found,
++ __entry->lblk, __entry->len,
++ __entry->found ? __entry->pblk : 0,
++ show_extent_status(__entry->found ? __entry->status : 0))
++)
++
++TRACE_EVENT(ext4_es_shrink_enter,
++ TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt),
++
++ TP_ARGS(sb, nr_to_scan, cache_cnt),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, nr_to_scan )
++ __field( int, cache_cnt )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, sb->s_dev)
++ tp_assign(nr_to_scan, nr_to_scan)
++ tp_assign(cache_cnt, cache_cnt)
++ ),
++
++ TP_printk("dev %d,%d nr_to_scan %d cache_cnt %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->nr_to_scan, __entry->cache_cnt)
++)
++
++TRACE_EVENT(ext4_es_shrink_exit,
++ TP_PROTO(struct super_block *sb, int shrunk_nr, int cache_cnt),
++
++ TP_ARGS(sb, shrunk_nr, cache_cnt),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, shrunk_nr )
++ __field( int, cache_cnt )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, sb->s_dev)
++ tp_assign(shrunk_nr, shrunk_nr)
++ tp_assign(cache_cnt, cache_cnt)
++ ),
++
++ TP_printk("dev %d,%d shrunk_nr %d cache_cnt %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->shrunk_nr, __entry->cache_cnt)
++)
++
++#endif
++
++#endif /* _TRACE_EXT4_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/gpio.h
+@@ -0,0 +1,56 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM gpio
++
++#if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_GPIO_H
++
++#include <linux/tracepoint.h>
++
++TRACE_EVENT(gpio_direction,
++
++ TP_PROTO(unsigned gpio, int in, int err),
++
++ TP_ARGS(gpio, in, err),
++
++ TP_STRUCT__entry(
++ __field(unsigned, gpio)
++ __field(int, in)
++ __field(int, err)
++ ),
++
++ TP_fast_assign(
++ tp_assign(gpio, gpio)
++ tp_assign(in, in)
++ tp_assign(err, err)
++ ),
++
++ TP_printk("%u %3s (%d)", __entry->gpio,
++ __entry->in ? "in" : "out", __entry->err)
++)
++
++TRACE_EVENT(gpio_value,
++
++ TP_PROTO(unsigned gpio, int get, int value),
++
++ TP_ARGS(gpio, get, value),
++
++ TP_STRUCT__entry(
++ __field(unsigned, gpio)
++ __field(int, get)
++ __field(int, value)
++ ),
++
++ TP_fast_assign(
++ tp_assign(gpio, gpio)
++ tp_assign(get, get)
++ tp_assign(value, value)
++ ),
++
++ TP_printk("%u %3s %d", __entry->gpio,
++ __entry->get ? "get" : "set", __entry->value)
++)
++
++#endif /* if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ) */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/irq.h
+@@ -0,0 +1,220 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM irq
++
++#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_IRQ_H
++
++#include <linux/tracepoint.h>
++
++#ifndef _TRACE_IRQ_DEF_
++#define _TRACE_IRQ_DEF_
++
++struct irqaction;
++struct softirq_action;
++
++#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
++#define show_softirq_name(val) \
++ __print_symbolic(val, \
++ softirq_name(HI), \
++ softirq_name(TIMER), \
++ softirq_name(NET_TX), \
++ softirq_name(NET_RX), \
++ softirq_name(BLOCK), \
++ softirq_name(BLOCK_IOPOLL), \
++ softirq_name(TASKLET), \
++ softirq_name(SCHED), \
++ softirq_name(HRTIMER), \
++ softirq_name(RCU))
++
++#endif /* _TRACE_IRQ_DEF_ */
++
++/**
++ * irq_handler_entry - called immediately before the irq action handler
++ * @irq: irq number
++ * @action: pointer to struct irqaction
++ *
++ * The struct irqaction pointed to by @action contains various
++ * information about the handler, including the device name,
++ * @action->name, and the device id, @action->dev_id. When used in
++ * conjunction with the irq_handler_exit tracepoint, we can figure
++ * out irq handler latencies.
++ */
++TRACE_EVENT(irq_handler_entry,
++
++ TP_PROTO(int irq, struct irqaction *action),
++
++ TP_ARGS(irq, action),
++
++ TP_STRUCT__entry(
++ __field( int, irq )
++ __string( name, action->name )
++ ),
++
++ TP_fast_assign(
++ tp_assign(irq, irq)
++ tp_strcpy(name, action->name)
++ ),
++
++ TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
++)
++
++/**
++ * irq_handler_exit - called immediately after the irq action handler returns
++ * @irq: irq number
++ * @action: pointer to struct irqaction
++ * @ret: return value
++ *
++ * If the @ret value is set to IRQ_HANDLED, then we know that the corresponding
++ * @action->handler scuccessully handled this irq. Otherwise, the irq might be
++ * a shared irq line, or the irq was not handled successfully. Can be used in
++ * conjunction with the irq_handler_entry to understand irq handler latencies.
++ */
++TRACE_EVENT(irq_handler_exit,
++
++ TP_PROTO(int irq, struct irqaction *action, int ret),
++
++ TP_ARGS(irq, action, ret),
++
++ TP_STRUCT__entry(
++ __field( int, irq )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ tp_assign(irq, irq)
++ tp_assign(ret, ret)
++ ),
++
++ TP_printk("irq=%d ret=%s",
++ __entry->irq, __entry->ret ? "handled" : "unhandled")
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++DECLARE_EVENT_CLASS(softirq,
++
++ TP_PROTO(unsigned int vec_nr),
++
++ TP_ARGS(vec_nr),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, vec )
++ ),
++
++ TP_fast_assign(
++ tp_assign(vec, vec_nr)
++ ),
++
++ TP_printk("vec=%u [action=%s]", __entry->vec,
++ show_softirq_name(__entry->vec))
++)
++
++/**
++ * softirq_entry - called immediately before the softirq handler
++ * @vec_nr: softirq vector number
++ *
++ * When used in combination with the softirq_exit tracepoint
++ * we can determine the softirq handler runtine.
++ */
++DEFINE_EVENT(softirq, softirq_entry,
++
++ TP_PROTO(unsigned int vec_nr),
++
++ TP_ARGS(vec_nr)
++)
++
++/**
++ * softirq_exit - called immediately after the softirq handler returns
++ * @vec_nr: softirq vector number
++ *
++ * When used in combination with the softirq_entry tracepoint
++ * we can determine the softirq handler runtine.
++ */
++DEFINE_EVENT(softirq, softirq_exit,
++
++ TP_PROTO(unsigned int vec_nr),
++
++ TP_ARGS(vec_nr)
++)
++
++/**
++ * softirq_raise - called immediately when a softirq is raised
++ * @vec_nr: softirq vector number
++ *
++ * When used in combination with the softirq_entry tracepoint
++ * we can determine the softirq raise to run latency.
++ */
++DEFINE_EVENT(softirq, softirq_raise,
++
++ TP_PROTO(unsigned int vec_nr),
++
++ TP_ARGS(vec_nr)
++)
++#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) */
++DECLARE_EVENT_CLASS(softirq,
++
++ TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
++
++ TP_ARGS(h, vec),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, vec )
++ ),
++
++ TP_fast_assign(
++ tp_assign(vec, (int)(h - vec))
++ ),
++
++ TP_printk("vec=%u [action=%s]", __entry->vec,
++ show_softirq_name(__entry->vec))
++)
++
++/**
++ * softirq_entry - called immediately before the softirq handler
++ * @h: pointer to struct softirq_action
++ * @vec: pointer to first struct softirq_action in softirq_vec array
++ *
++ * When used in combination with the softirq_exit tracepoint
++ * we can determine the softirq handler runtine.
++ */
++DEFINE_EVENT(softirq, softirq_entry,
++
++ TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
++
++ TP_ARGS(h, vec)
++)
++
++/**
++ * softirq_exit - called immediately after the softirq handler returns
++ * @h: pointer to struct softirq_action
++ * @vec: pointer to first struct softirq_action in softirq_vec array
++ *
++ * When used in combination with the softirq_entry tracepoint
++ * we can determine the softirq handler runtine.
++ */
++DEFINE_EVENT(softirq, softirq_exit,
++
++ TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
++
++ TP_ARGS(h, vec)
++)
++
++/**
++ * softirq_raise - called immediately when a softirq is raised
++ * @h: pointer to struct softirq_action
++ * @vec: pointer to first struct softirq_action in softirq_vec array
++ *
++ * When used in combination with the softirq_entry tracepoint
++ * we can determine the softirq raise to run latency.
++ */
++DEFINE_EVENT(softirq, softirq_raise,
++
++ TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
++
++ TP_ARGS(h, vec)
++)
++#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) */
++
++#endif /* _TRACE_IRQ_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/jbd.h
+@@ -0,0 +1,268 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM jbd
++
++#if !defined(_TRACE_JBD_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_JBD_H
++
++#include <linux/jbd.h>
++#include <linux/tracepoint.h>
++#include <linux/version.h>
++
++TRACE_EVENT(jbd_checkpoint,
++
++ TP_PROTO(journal_t *journal, int result),
++
++ TP_ARGS(journal, result),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, result )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, journal->j_fs_dev->bd_dev)
++ tp_assign(result, result)
++ ),
++
++ TP_printk("dev %d,%d result %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->result)
++)
++
++DECLARE_EVENT_CLASS(jbd_commit,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++ __field( char, sync_commit )
++#endif
++ __field( int, transaction )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, journal->j_fs_dev->bd_dev)
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++ tp_assign(sync_commit, commit_transaction->t_synchronous_commit)
++#endif
++ tp_assign(transaction, commit_transaction->t_tid)
++ ),
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++ TP_printk("dev %d,%d transaction %d sync %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->transaction, __entry->sync_commit)
++#else
++ TP_printk("dev %d,%d transaction %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->transaction)
++#endif
++)
++
++DEFINE_EVENT(jbd_commit, jbd_start_commit,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction)
++)
++
++DEFINE_EVENT(jbd_commit, jbd_commit_locking,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction)
++)
++
++DEFINE_EVENT(jbd_commit, jbd_commit_flushing,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction)
++)
++
++DEFINE_EVENT(jbd_commit, jbd_commit_logging,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction)
++)
++
++TRACE_EVENT(jbd_drop_transaction,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++ __field( char, sync_commit )
++#endif
++ __field( int, transaction )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, journal->j_fs_dev->bd_dev)
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++ tp_assign(sync_commit, commit_transaction->t_synchronous_commit)
++#endif
++ tp_assign(transaction, commit_transaction->t_tid)
++ ),
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++ TP_printk("dev %d,%d transaction %d sync %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->transaction, __entry->sync_commit)
++#else
++ TP_printk("dev %d,%d transaction %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->transaction)
++#endif
++)
++
++TRACE_EVENT(jbd_end_commit,
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++ __field( char, sync_commit )
++#endif
++ __field( int, transaction )
++ __field( int, head )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, journal->j_fs_dev->bd_dev)
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++ tp_assign(sync_commit, commit_transaction->t_synchronous_commit)
++#endif
++ tp_assign(transaction, commit_transaction->t_tid)
++ tp_assign(head, journal->j_tail_sequence)
++ ),
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++ TP_printk("dev %d,%d transaction %d sync %d head %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->transaction, __entry->sync_commit, __entry->head)
++#else
++ TP_printk("dev %d,%d transaction %d head %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->transaction, __entry->head)
++#endif
++)
++
++TRACE_EVENT(jbd_do_submit_data,
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++ __field( char, sync_commit )
++#endif
++ __field( int, transaction )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, journal->j_fs_dev->bd_dev)
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++ tp_assign(sync_commit, commit_transaction->t_synchronous_commit)
++#endif
++ tp_assign(transaction, commit_transaction->t_tid)
++ ),
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++ TP_printk("dev %d,%d transaction %d sync %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->transaction, __entry->sync_commit)
++#else
++ TP_printk("dev %d,%d transaction %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->transaction)
++#endif
++)
++
++TRACE_EVENT(jbd_cleanup_journal_tail,
++
++ TP_PROTO(journal_t *journal, tid_t first_tid,
++ unsigned long block_nr, unsigned long freed),
++
++ TP_ARGS(journal, first_tid, block_nr, freed),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( tid_t, tail_sequence )
++ __field( tid_t, first_tid )
++ __field(unsigned long, block_nr )
++ __field(unsigned long, freed )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, journal->j_fs_dev->bd_dev)
++ tp_assign(tail_sequence, journal->j_tail_sequence)
++ tp_assign(first_tid, first_tid)
++ tp_assign(block_nr, block_nr)
++ tp_assign(freed, freed)
++ ),
++
++ TP_printk("dev %d,%d from %u to %u offset %lu freed %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->tail_sequence, __entry->first_tid,
++ __entry->block_nr, __entry->freed)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0))
++TRACE_EVENT_MAP(journal_write_superblock,
++
++ jbd_journal_write_superblock,
++
++ TP_PROTO(journal_t *journal, int write_op),
++
++ TP_ARGS(journal, write_op),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, write_op )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, journal->j_fs_dev->bd_dev)
++ tp_assign(write_op, write_op)
++ ),
++
++ TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev),
++ MINOR(__entry->dev), __entry->write_op)
++)
++#else
++TRACE_EVENT(jbd_update_superblock_end,
++ TP_PROTO(journal_t *journal, int wait),
++
++ TP_ARGS(journal, wait),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, wait )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, journal->j_fs_dev->bd_dev)
++ tp_assign(wait, wait)
++ ),
++
++ TP_printk("dev %d,%d wait %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->wait)
++)
++#endif
++
++#endif /* _TRACE_JBD_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/jbd2.h
+@@ -0,0 +1,280 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM jbd2
++
++#if !defined(_TRACE_JBD2_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_JBD2_H
++
++#include <linux/jbd2.h>
++#include <linux/tracepoint.h>
++#include <linux/version.h>
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
++#ifndef _TRACE_JBD2_DEF
++#define _TRACE_JBD2_DEF
++struct transaction_chp_stats_s;
++struct transaction_run_stats_s;
++#endif
++#endif
++
++TRACE_EVENT(jbd2_checkpoint,
++
++ TP_PROTO(journal_t *journal, int result),
++
++ TP_ARGS(journal, result),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, result )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, journal->j_fs_dev->bd_dev)
++ tp_assign(result, result)
++ ),
++
++ TP_printk("dev %d,%d result %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->result)
++)
++
++DECLARE_EVENT_CLASS(jbd2_commit,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( char, sync_commit )
++ __field( int, transaction )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, journal->j_fs_dev->bd_dev)
++ tp_assign(sync_commit, commit_transaction->t_synchronous_commit)
++ tp_assign(transaction, commit_transaction->t_tid)
++ ),
++
++ TP_printk("dev %d,%d transaction %d sync %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->transaction, __entry->sync_commit)
++)
++
++DEFINE_EVENT(jbd2_commit, jbd2_start_commit,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction)
++)
++
++DEFINE_EVENT(jbd2_commit, jbd2_commit_locking,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction)
++)
++
++DEFINE_EVENT(jbd2_commit, jbd2_commit_flushing,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction)
++)
++
++DEFINE_EVENT(jbd2_commit, jbd2_commit_logging,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++DEFINE_EVENT(jbd2_commit, jbd2_drop_transaction,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction)
++)
++#endif
++
++TRACE_EVENT(jbd2_end_commit,
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( char, sync_commit )
++ __field( int, transaction )
++ __field( int, head )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, journal->j_fs_dev->bd_dev)
++ tp_assign(sync_commit, commit_transaction->t_synchronous_commit)
++ tp_assign(transaction, commit_transaction->t_tid)
++ tp_assign(head, journal->j_tail_sequence)
++ ),
++
++ TP_printk("dev %d,%d transaction %d sync %d head %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->transaction, __entry->sync_commit, __entry->head)
++)
++
++TRACE_EVENT(jbd2_submit_inode_data,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, inode->i_sb->s_dev)
++ tp_assign(ino, inode->i_ino)
++ ),
++
++ TP_printk("dev %d,%d ino %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
++TRACE_EVENT(jbd2_run_stats,
++ TP_PROTO(dev_t dev, unsigned long tid,
++ struct transaction_run_stats_s *stats),
++
++ TP_ARGS(dev, tid, stats),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( unsigned long, tid )
++ __field( unsigned long, wait )
++ __field( unsigned long, running )
++ __field( unsigned long, locked )
++ __field( unsigned long, flushing )
++ __field( unsigned long, logging )
++ __field( __u32, handle_count )
++ __field( __u32, blocks )
++ __field( __u32, blocks_logged )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, dev)
++ tp_assign(tid, tid)
++ tp_assign(wait, stats->rs_wait)
++ tp_assign(running, stats->rs_running)
++ tp_assign(locked, stats->rs_locked)
++ tp_assign(flushing, stats->rs_flushing)
++ tp_assign(logging, stats->rs_logging)
++ tp_assign(handle_count, stats->rs_handle_count)
++ tp_assign(blocks, stats->rs_blocks)
++ tp_assign(blocks_logged, stats->rs_blocks_logged)
++ ),
++
++ TP_printk("dev %d,%d tid %lu wait %u running %u locked %u flushing %u "
++ "logging %u handle_count %u blocks %u blocks_logged %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
++ jiffies_to_msecs(__entry->wait),
++ jiffies_to_msecs(__entry->running),
++ jiffies_to_msecs(__entry->locked),
++ jiffies_to_msecs(__entry->flushing),
++ jiffies_to_msecs(__entry->logging),
++ __entry->handle_count, __entry->blocks,
++ __entry->blocks_logged)
++)
++
++TRACE_EVENT(jbd2_checkpoint_stats,
++ TP_PROTO(dev_t dev, unsigned long tid,
++ struct transaction_chp_stats_s *stats),
++
++ TP_ARGS(dev, tid, stats),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( unsigned long, tid )
++ __field( unsigned long, chp_time )
++ __field( __u32, forced_to_close )
++ __field( __u32, written )
++ __field( __u32, dropped )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, dev)
++ tp_assign(tid, tid)
++ tp_assign(chp_time, stats->cs_chp_time)
++ tp_assign(forced_to_close, stats->cs_forced_to_close)
++ tp_assign(written, stats->cs_written)
++ tp_assign(dropped, stats->cs_dropped)
++ ),
++
++ TP_printk("dev %d,%d tid %lu chp_time %u forced_to_close %u "
++ "written %u dropped %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
++ jiffies_to_msecs(__entry->chp_time),
++ __entry->forced_to_close, __entry->written, __entry->dropped)
++)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++TRACE_EVENT(jbd2_update_log_tail,
++#else
++TRACE_EVENT(jbd2_cleanup_journal_tail,
++#endif
++
++ TP_PROTO(journal_t *journal, tid_t first_tid,
++ unsigned long block_nr, unsigned long freed),
++
++ TP_ARGS(journal, first_tid, block_nr, freed),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( tid_t, tail_sequence )
++ __field( tid_t, first_tid )
++ __field(unsigned long, block_nr )
++ __field(unsigned long, freed )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, journal->j_fs_dev->bd_dev)
++ tp_assign(tail_sequence, journal->j_tail_sequence)
++ tp_assign(first_tid, first_tid)
++ tp_assign(block_nr, block_nr)
++ tp_assign(freed, freed)
++ ),
++
++ TP_printk("dev %d,%d from %u to %u offset %lu freed %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->tail_sequence, __entry->first_tid,
++ __entry->block_nr, __entry->freed)
++)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++TRACE_EVENT(jbd2_write_superblock,
++
++ TP_PROTO(journal_t *journal, int write_op),
++
++ TP_ARGS(journal, write_op),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, write_op )
++ ),
++
++ TP_fast_assign(
++ tp_assign(dev, journal->j_fs_dev->bd_dev)
++ tp_assign(write_op, write_op)
++ ),
++
++ TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev),
++ MINOR(__entry->dev), __entry->write_op)
++)
++#endif
++
++#endif /* _TRACE_JBD2_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/kmem.h
+@@ -0,0 +1,380 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM kmem
++
++#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_KMEM_H
++
++#include <linux/types.h>
++#include <linux/tracepoint.h>
++#include <linux/version.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
++#include <trace/events/gfpflags.h>
++#endif
++
++DECLARE_EVENT_CLASS(kmem_alloc,
++
++ TP_PROTO(unsigned long call_site,
++ const void *ptr,
++ size_t bytes_req,
++ size_t bytes_alloc,
++ gfp_t gfp_flags),
++
++ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
++
++ TP_STRUCT__entry(
++ __field_hex( unsigned long, call_site )
++ __field_hex( const void *, ptr )
++ __field( size_t, bytes_req )
++ __field( size_t, bytes_alloc )
++ __field( gfp_t, gfp_flags )
++ ),
++
++ TP_fast_assign(
++ tp_assign(call_site, call_site)
++ tp_assign(ptr, ptr)
++ tp_assign(bytes_req, bytes_req)
++ tp_assign(bytes_alloc, bytes_alloc)
++ tp_assign(gfp_flags, gfp_flags)
++ ),
++
++ TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
++ __entry->call_site,
++ __entry->ptr,
++ __entry->bytes_req,
++ __entry->bytes_alloc,
++ show_gfp_flags(__entry->gfp_flags))
++)
++
++DEFINE_EVENT_MAP(kmem_alloc, kmalloc,
++
++ kmem_kmalloc,
++
++ TP_PROTO(unsigned long call_site, const void *ptr,
++ size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
++
++ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
++)
++
++DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
++
++ TP_PROTO(unsigned long call_site, const void *ptr,
++ size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
++
++ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
++)
++
++DECLARE_EVENT_CLASS(kmem_alloc_node,
++
++ TP_PROTO(unsigned long call_site,
++ const void *ptr,
++ size_t bytes_req,
++ size_t bytes_alloc,
++ gfp_t gfp_flags,
++ int node),
++
++ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
++
++ TP_STRUCT__entry(
++ __field_hex( unsigned long, call_site )
++ __field_hex( const void *, ptr )
++ __field( size_t, bytes_req )
++ __field( size_t, bytes_alloc )
++ __field( gfp_t, gfp_flags )
++ __field( int, node )
++ ),
++
++ TP_fast_assign(
++ tp_assign(call_site, call_site)
++ tp_assign(ptr, ptr)
++ tp_assign(bytes_req, bytes_req)
++ tp_assign(bytes_alloc, bytes_alloc)
++ tp_assign(gfp_flags, gfp_flags)
++ tp_assign(node, node)
++ ),
++
++ TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
++ __entry->call_site,
++ __entry->ptr,
++ __entry->bytes_req,
++ __entry->bytes_alloc,
++ show_gfp_flags(__entry->gfp_flags),
++ __entry->node)
++)
++
++DEFINE_EVENT_MAP(kmem_alloc_node, kmalloc_node,
++
++ kmem_kmalloc_node,
++
++ TP_PROTO(unsigned long call_site, const void *ptr,
++ size_t bytes_req, size_t bytes_alloc,
++ gfp_t gfp_flags, int node),
++
++ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
++)
++
++DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
++
++ TP_PROTO(unsigned long call_site, const void *ptr,
++ size_t bytes_req, size_t bytes_alloc,
++ gfp_t gfp_flags, int node),
++
++ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
++)
++
++DECLARE_EVENT_CLASS(kmem_free,
++
++ TP_PROTO(unsigned long call_site, const void *ptr),
++
++ TP_ARGS(call_site, ptr),
++
++ TP_STRUCT__entry(
++ __field_hex( unsigned long, call_site )
++ __field_hex( const void *, ptr )
++ ),
++
++ TP_fast_assign(
++ tp_assign(call_site, call_site)
++ tp_assign(ptr, ptr)
++ ),
++
++ TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
++)
++
++DEFINE_EVENT_MAP(kmem_free, kfree,
++
++ kmem_kfree,
++
++ TP_PROTO(unsigned long call_site, const void *ptr),
++
++ TP_ARGS(call_site, ptr)
++)
++
++DEFINE_EVENT(kmem_free, kmem_cache_free,
++
++ TP_PROTO(unsigned long call_site, const void *ptr),
++
++ TP_ARGS(call_site, ptr)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++TRACE_EVENT(mm_page_free,
++#else
++TRACE_EVENT(mm_page_free_direct,
++#endif
++
++ TP_PROTO(struct page *page, unsigned int order),
++
++ TP_ARGS(page, order),
++
++ TP_STRUCT__entry(
++ __field_hex( struct page *, page )
++ __field( unsigned int, order )
++ ),
++
++ TP_fast_assign(
++ tp_assign(page, page)
++ tp_assign(order, order)
++ ),
++
++ TP_printk("page=%p pfn=%lu order=%d",
++ __entry->page,
++ page_to_pfn(__entry->page),
++ __entry->order)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++TRACE_EVENT(mm_page_free_batched,
++#else
++TRACE_EVENT(mm_pagevec_free,
++#endif
++
++ TP_PROTO(struct page *page, int cold),
++
++ TP_ARGS(page, cold),
++
++ TP_STRUCT__entry(
++ __field_hex( struct page *, page )
++ __field( int, cold )
++ ),
++
++ TP_fast_assign(
++ tp_assign(page, page)
++ tp_assign(cold, cold)
++ ),
++
++ TP_printk("page=%p pfn=%lu order=0 cold=%d",
++ __entry->page,
++ page_to_pfn(__entry->page),
++ __entry->cold)
++)
++
++TRACE_EVENT(mm_page_alloc,
++
++ TP_PROTO(struct page *page, unsigned int order,
++ gfp_t gfp_flags, int migratetype),
++
++ TP_ARGS(page, order, gfp_flags, migratetype),
++
++ TP_STRUCT__entry(
++ __field_hex( struct page *, page )
++ __field( unsigned int, order )
++ __field( gfp_t, gfp_flags )
++ __field( int, migratetype )
++ ),
++
++ TP_fast_assign(
++ tp_assign(page, page)
++ tp_assign(order, order)
++ tp_assign(gfp_flags, gfp_flags)
++ tp_assign(migratetype, migratetype)
++ ),
++
++ TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
++ __entry->page,
++ __entry->page ? page_to_pfn(__entry->page) : 0,
++ __entry->order,
++ __entry->migratetype,
++ show_gfp_flags(__entry->gfp_flags))
++)
++
++DECLARE_EVENT_CLASS(mm_page,
++
++ TP_PROTO(struct page *page, unsigned int order, int migratetype),
++
++ TP_ARGS(page, order, migratetype),
++
++ TP_STRUCT__entry(
++ __field_hex( struct page *, page )
++ __field( unsigned int, order )
++ __field( int, migratetype )
++ ),
++
++ TP_fast_assign(
++ tp_assign(page, page)
++ tp_assign(order, order)
++ tp_assign(migratetype, migratetype)
++ ),
++
++ TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
++ __entry->page,
++ __entry->page ? page_to_pfn(__entry->page) : 0,
++ __entry->order,
++ __entry->migratetype,
++ __entry->order == 0)
++)
++
++DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
++
++ TP_PROTO(struct page *page, unsigned int order, int migratetype),
++
++ TP_ARGS(page, order, migratetype)
++)
++
++DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
++ TP_PROTO(struct page *page, unsigned int order, int migratetype),
++#else
++ TP_PROTO(struct page *page, int order, int migratetype),
++#endif
++
++ TP_ARGS(page, order, migratetype),
++
++ TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
++ __entry->page, page_to_pfn(__entry->page),
++ __entry->order, __entry->migratetype)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
++
++TRACE_EVENT(mm_page_alloc_extfrag,
++
++ TP_PROTO(struct page *page,
++ int alloc_order, int fallback_order,
++ int alloc_migratetype, int fallback_migratetype,
++ int change_ownership),
++
++ TP_ARGS(page,
++ alloc_order, fallback_order,
++ alloc_migratetype, fallback_migratetype,
++ change_ownership),
++
++ TP_STRUCT__entry(
++ __field_hex( struct page *, page )
++ __field( int, alloc_order )
++ __field( int, fallback_order )
++ __field( int, alloc_migratetype )
++ __field( int, fallback_migratetype )
++ __field( int, change_ownership )
++ ),
++
++ TP_fast_assign(
++ tp_assign(page, page)
++ tp_assign(alloc_order, alloc_order)
++ tp_assign(fallback_order, fallback_order)
++ tp_assign(alloc_migratetype, alloc_migratetype)
++ tp_assign(fallback_migratetype, fallback_migratetype)
++ tp_assign(change_ownership, change_ownership)
++ ),
++
++ TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
++ __entry->page,
++ page_to_pfn(__entry->page),
++ __entry->alloc_order,
++ __entry->fallback_order,
++ pageblock_order,
++ __entry->alloc_migratetype,
++ __entry->fallback_migratetype,
++ __entry->fallback_order < pageblock_order,
++ __entry->change_ownership)
++)
++
++#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
++
++TRACE_EVENT(mm_page_alloc_extfrag,
++
++ TP_PROTO(struct page *page,
++ int alloc_order, int fallback_order,
++ int alloc_migratetype, int fallback_migratetype),
++
++ TP_ARGS(page,
++ alloc_order, fallback_order,
++ alloc_migratetype, fallback_migratetype),
++
++ TP_STRUCT__entry(
++ __field_hex( struct page *, page )
++ __field( int, alloc_order )
++ __field( int, fallback_order )
++ __field( int, alloc_migratetype )
++ __field( int, fallback_migratetype )
++ ),
++
++ TP_fast_assign(
++ tp_assign(page, page)
++ tp_assign(alloc_order, alloc_order)
++ tp_assign(fallback_order, fallback_order)
++ tp_assign(alloc_migratetype, alloc_migratetype)
++ tp_assign(fallback_migratetype, fallback_migratetype)
++ ),
++
++ TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
++ __entry->page,
++ page_to_pfn(__entry->page),
++ __entry->alloc_order,
++ __entry->fallback_order,
++ pageblock_order,
++ __entry->alloc_migratetype,
++ __entry->fallback_migratetype,
++ __entry->fallback_order < pageblock_order,
++ __entry->alloc_migratetype == __entry->fallback_migratetype)
++)
++
++#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
++
++#endif
++
++#endif /* _TRACE_KMEM_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/kvm.h
+@@ -0,0 +1,356 @@
++#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_KVM_MAIN_H
++
++#include <linux/tracepoint.h>
++#include <linux/version.h>
++
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM kvm
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
++
++#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
++
++#define kvm_trace_exit_reason \
++ ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
++ ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
++ ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
++ ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
++ ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \
++ ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH)
++
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
++
++#define kvm_trace_exit_reason \
++ ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
++ ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
++ ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
++ ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
++ ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \
++ ERSN(S390_UCONTROL)
++
++#else
++
++#define kvm_trace_exit_reason \
++ ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
++ ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
++ ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
++ ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
++ ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI)
++
++#endif
++
++TRACE_EVENT(kvm_userspace_exit,
++ TP_PROTO(__u32 reason, int errno),
++ TP_ARGS(reason, errno),
++
++ TP_STRUCT__entry(
++ __field( __u32, reason )
++ __field( int, errno )
++ ),
++
++ TP_fast_assign(
++ tp_assign(reason, reason)
++ tp_assign(errno, errno)
++ ),
++
++ TP_printk("reason %s (%d)",
++ __entry->errno < 0 ?
++ (__entry->errno == -EINTR ? "restart" : "error") :
++ __print_symbolic(__entry->reason, kvm_trace_exit_reason),
++ __entry->errno < 0 ? -__entry->errno : __entry->reason)
++)
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0))
++#if defined(__KVM_HAVE_IOAPIC)
++#undef __KVM_HAVE_IRQ_LINE
++#define __KVM_HAVE_IRQ_LINE
++#endif
++#endif
++
++#if defined(__KVM_HAVE_IRQ_LINE)
++TRACE_EVENT(kvm_set_irq,
++ TP_PROTO(unsigned int gsi, int level, int irq_source_id),
++ TP_ARGS(gsi, level, irq_source_id),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, gsi )
++ __field( int, level )
++ __field( int, irq_source_id )
++ ),
++
++ TP_fast_assign(
++ tp_assign(gsi, gsi)
++ tp_assign(level, level)
++ tp_assign(irq_source_id, irq_source_id)
++ ),
++
++ TP_printk("gsi %u level %d source %d",
++ __entry->gsi, __entry->level, __entry->irq_source_id)
++)
++#endif
++
++#if defined(__KVM_HAVE_IOAPIC)
++#define kvm_deliver_mode \
++ {0x0, "Fixed"}, \
++ {0x1, "LowPrio"}, \
++ {0x2, "SMI"}, \
++ {0x3, "Res3"}, \
++ {0x4, "NMI"}, \
++ {0x5, "INIT"}, \
++ {0x6, "SIPI"}, \
++ {0x7, "ExtINT"}
++
++TRACE_EVENT(kvm_ioapic_set_irq,
++ TP_PROTO(__u64 e, int pin, bool coalesced),
++ TP_ARGS(e, pin, coalesced),
++
++ TP_STRUCT__entry(
++ __field( __u64, e )
++ __field( int, pin )
++ __field( bool, coalesced )
++ ),
++
++ TP_fast_assign(
++ tp_assign(e, e)
++ tp_assign(pin, pin)
++ tp_assign(coalesced, coalesced)
++ ),
++
++ TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
++ __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
++ __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
++ (__entry->e & (1<<11)) ? "logical" : "physical",
++ (__entry->e & (1<<15)) ? "level" : "edge",
++ (__entry->e & (1<<16)) ? "|masked" : "",
++ __entry->coalesced ? " (coalesced)" : "")
++)
++
++TRACE_EVENT(kvm_msi_set_irq,
++ TP_PROTO(__u64 address, __u64 data),
++ TP_ARGS(address, data),
++
++ TP_STRUCT__entry(
++ __field( __u64, address )
++ __field( __u64, data )
++ ),
++
++ TP_fast_assign(
++ tp_assign(address, address)
++ tp_assign(data, data)
++ ),
++
++ TP_printk("dst %u vec %x (%s|%s|%s%s)",
++ (u8)(__entry->address >> 12), (u8)__entry->data,
++ __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
++ (__entry->address & (1<<2)) ? "logical" : "physical",
++ (__entry->data & (1<<15)) ? "level" : "edge",
++ (__entry->address & (1<<3)) ? "|rh" : "")
++)
++
++#define kvm_irqchips \
++ {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
++ {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
++ {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
++
++TRACE_EVENT(kvm_ack_irq,
++ TP_PROTO(unsigned int irqchip, unsigned int pin),
++ TP_ARGS(irqchip, pin),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, irqchip )
++ __field( unsigned int, pin )
++ ),
++
++ TP_fast_assign(
++ tp_assign(irqchip, irqchip)
++ tp_assign(pin, pin)
++ ),
++
++ TP_printk("irqchip %s pin %u",
++ __print_symbolic(__entry->irqchip, kvm_irqchips),
++ __entry->pin)
++)
++
++
++
++#endif /* defined(__KVM_HAVE_IOAPIC) */
++
++#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
++#define KVM_TRACE_MMIO_READ 1
++#define KVM_TRACE_MMIO_WRITE 2
++
++#define kvm_trace_symbol_mmio \
++ { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
++ { KVM_TRACE_MMIO_READ, "read" }, \
++ { KVM_TRACE_MMIO_WRITE, "write" }
++
++TRACE_EVENT(kvm_mmio,
++ TP_PROTO(int type, int len, u64 gpa, u64 val),
++ TP_ARGS(type, len, gpa, val),
++
++ TP_STRUCT__entry(
++ __field( u32, type )
++ __field( u32, len )
++ __field( u64, gpa )
++ __field( u64, val )
++ ),
++
++ TP_fast_assign(
++ tp_assign(type, type)
++ tp_assign(len, len)
++ tp_assign(gpa, gpa)
++ tp_assign(val, val)
++ ),
++
++ TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
++ __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
++ __entry->len, __entry->gpa, __entry->val)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
++
++#define kvm_fpu_load_symbol \
++ {0, "unload"}, \
++ {1, "load"}
++
++TRACE_EVENT(kvm_fpu,
++ TP_PROTO(int load),
++ TP_ARGS(load),
++
++ TP_STRUCT__entry(
++ __field( u32, load )
++ ),
++
++ TP_fast_assign(
++ tp_assign(load, load)
++ ),
++
++ TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
++)
++
++TRACE_EVENT(kvm_age_page,
++ TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
++ TP_ARGS(hva, slot, ref),
++
++ TP_STRUCT__entry(
++ __field( u64, hva )
++ __field( u64, gfn )
++ __field( u8, referenced )
++ ),
++
++ TP_fast_assign(
++ tp_assign(hva, hva)
++ tp_assign(gfn,
++ slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT))
++ tp_assign(referenced, ref)
++ ),
++
++ TP_printk("hva %llx gfn %llx %s",
++ __entry->hva, __entry->gfn,
++ __entry->referenced ? "YOUNG" : "OLD")
++)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
++
++#ifdef CONFIG_KVM_ASYNC_PF
++DECLARE_EVENT_CLASS(kvm_async_get_page_class,
++
++ TP_PROTO(u64 gva, u64 gfn),
++
++ TP_ARGS(gva, gfn),
++
++ TP_STRUCT__entry(
++ __field(__u64, gva)
++ __field(u64, gfn)
++ ),
++
++ TP_fast_assign(
++ tp_assign(gva, gva)
++ tp_assign(gfn, gfn)
++ ),
++
++ TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
++)
++
++DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
++
++ TP_PROTO(u64 gva, u64 gfn),
++
++ TP_ARGS(gva, gfn)
++)
++
++DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
++
++ TP_PROTO(u64 gva, u64 gfn),
++
++ TP_ARGS(gva, gfn)
++)
++
++DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
++
++ TP_PROTO(u64 token, u64 gva),
++
++ TP_ARGS(token, gva),
++
++ TP_STRUCT__entry(
++ __field(__u64, token)
++ __field(__u64, gva)
++ ),
++
++ TP_fast_assign(
++ tp_assign(token, token)
++ tp_assign(gva, gva)
++ ),
++
++ TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
++
++)
++
++DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
++
++ TP_PROTO(u64 token, u64 gva),
++
++ TP_ARGS(token, gva)
++)
++
++DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
++
++ TP_PROTO(u64 token, u64 gva),
++
++ TP_ARGS(token, gva)
++)
++
++TRACE_EVENT(
++ kvm_async_pf_completed,
++ TP_PROTO(unsigned long address, struct page *page, u64 gva),
++ TP_ARGS(address, page, gva),
++
++ TP_STRUCT__entry(
++ __field(unsigned long, address)
++ __field(pfn_t, pfn)
++ __field(u64, gva)
++ ),
++
++ TP_fast_assign(
++ tp_assign(address, address)
++ tp_assign(pfn, page ? page_to_pfn(page) : 0)
++ tp_assign(gva, gva)
++ ),
++
++ TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva,
++ __entry->address, __entry->pfn)
++)
++
++#endif
++
++#endif
++
++#endif /* _TRACE_KVM_MAIN_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/lock.h
+@@ -0,0 +1,207 @@
++#include <linux/version.h>
++
++#undef TRACE_SYSTEM
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
++#define TRACE_SYSTEM lock
++#else
++#define TRACE_SYSTEM lockdep
++#define TRACE_INCLUDE_FILE lock
++#if defined(_TRACE_LOCKDEP_H)
++#define _TRACE_LOCK_H
++#endif
++#endif
++
++#if !defined(_TRACE_LOCK_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_LOCK_H
++
++#include <linux/lockdep.h>
++#include <linux/tracepoint.h>
++
++#ifdef CONFIG_LOCKDEP
++
++TRACE_EVENT(lock_acquire,
++
++ TP_PROTO(struct lockdep_map *lock, unsigned int subclass,
++ int trylock, int read, int check,
++ struct lockdep_map *next_lock, unsigned long ip),
++
++ TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip),
++
++ TP_STRUCT__entry(
++ __field(unsigned int, flags)
++ __string(name, lock->name)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
++ __field(void *, lockdep_addr)
++#endif
++ ),
++
++ TP_fast_assign(
++ tp_assign(flags, (trylock ? 1 : 0) | (read ? 2 : 0))
++ tp_strcpy(name, lock->name)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
++ tp_assign(lockdep_addr, lock)
++#endif
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
++ TP_printk("%p %s%s%s", __entry->lockdep_addr,
++#else
++ TP_printk("%s%s%s",
++#endif
++ (__entry->flags & 1) ? "try " : "",
++ (__entry->flags & 2) ? "read " : "",
++ __get_str(name))
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++
++DECLARE_EVENT_CLASS(lock,
++
++ TP_PROTO(struct lockdep_map *lock, unsigned long ip),
++
++ TP_ARGS(lock, ip),
++
++ TP_STRUCT__entry(
++ __string( name, lock->name )
++ __field( void *, lockdep_addr )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, lock->name)
++ tp_assign(lockdep_addr, lock)
++ ),
++
++ TP_printk("%p %s", __entry->lockdep_addr, __get_str(name))
++)
++
++DEFINE_EVENT(lock, lock_release,
++
++ TP_PROTO(struct lockdep_map *lock, unsigned long ip),
++
++ TP_ARGS(lock, ip)
++)
++
++#ifdef CONFIG_LOCK_STAT
++
++DEFINE_EVENT(lock, lock_contended,
++
++ TP_PROTO(struct lockdep_map *lock, unsigned long ip),
++
++ TP_ARGS(lock, ip)
++)
++
++DEFINE_EVENT(lock, lock_acquired,
++
++ TP_PROTO(struct lockdep_map *lock, unsigned long ip),
++
++ TP_ARGS(lock, ip)
++)
++
++#endif
++
++#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
++
++TRACE_EVENT(lock_release,
++
++ TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip),
++
++ TP_ARGS(lock, nested, ip),
++
++ TP_STRUCT__entry(
++ __string( name, lock->name )
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
++ __field( void *, lockdep_addr )
++#endif
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, lock->name)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
++ tp_assign(lockdep_addr, lock)
++#endif
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
++ TP_printk("%p %s", __entry->lockdep_addr, __get_str(name))
++#else
++ TP_printk("%s", __get_str(name))
++#endif
++)
++
++#ifdef CONFIG_LOCK_STAT
++
++TRACE_EVENT(lock_contended,
++
++ TP_PROTO(struct lockdep_map *lock, unsigned long ip),
++
++ TP_ARGS(lock, ip),
++
++ TP_STRUCT__entry(
++ __string( name, lock->name )
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
++ __field( void *, lockdep_addr )
++#endif
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, lock->name)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
++ tp_assign(lockdep_addr, lock)
++#endif
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
++ TP_printk("%p %s", __entry->lockdep_addr, __get_str(name))
++#else
++ TP_printk("%s", __get_str(name))
++#endif
++)
++
++TRACE_EVENT(lock_acquired,
++
++ TP_PROTO(struct lockdep_map *lock, unsigned long ip, s64 waittime),
++
++ TP_ARGS(lock, ip, waittime),
++
++ TP_STRUCT__entry(
++ __string( name, lock->name )
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
++ __field( s64, wait_nsec )
++ __field( void *, lockdep_addr )
++#else
++ __field(unsigned long, wait_usec)
++ __field(unsigned long, wait_nsec_rem)
++#endif
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, lock->name)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
++ tp_assign(wait_nsec, waittime)
++ tp_assign(lockdep_addr, lock)
++#else
++ tp_assign(wait_usec, (unsigned long)waittime)
++ tp_assign(wait_nsec_rem, do_div(waittime, NSEC_PER_USEC))
++#endif
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34))
++ TP_printk("%p %s (%llu ns)", __entry->lockdep_addr,
++ __get_str(name), __entry->wait_nsec)
++#else
++ TP_printk("%s (%lu.%03lu us)",
++ __get_str(name),
++ __entry->wait_usec, __entry->wait_nsec_rem)
++#endif
++)
++
++#endif
++
++#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
++
++#endif
++
++#endif /* _TRACE_LOCK_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/lttng-statedump.h
+@@ -0,0 +1,166 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM lttng_statedump
++
++#if !defined(_TRACE_LTTNG_STATEDUMP_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_LTTNG_STATEDUMP_H
++
++#include <linux/tracepoint.h>
++#include <linux/nsproxy.h>
++#include <linux/pid_namespace.h>
++
++TRACE_EVENT(lttng_statedump_start,
++ TP_PROTO(struct lttng_session *session),
++ TP_ARGS(session),
++ TP_STRUCT__entry(
++ ),
++ TP_fast_assign(
++ ),
++ TP_printk("")
++)
++
++TRACE_EVENT(lttng_statedump_end,
++ TP_PROTO(struct lttng_session *session),
++ TP_ARGS(session),
++ TP_STRUCT__entry(
++ ),
++ TP_fast_assign(
++ ),
++ TP_printk("")
++)
++
++TRACE_EVENT(lttng_statedump_process_state,
++ TP_PROTO(struct lttng_session *session,
++ struct task_struct *p,
++ int type, int mode, int submode, int status,
++ struct pid_namespace *pid_ns),
++ TP_ARGS(session, p, type, mode, submode, status, pid_ns),
++ TP_STRUCT__entry(
++ __field(pid_t, tid)
++ __field(pid_t, vtid)
++ __field(pid_t, pid)
++ __field(pid_t, vpid)
++ __field(pid_t, ppid)
++ __field(pid_t, vppid)
++ __array_text(char, name, TASK_COMM_LEN)
++ __field(int, type)
++ __field(int, mode)
++ __field(int, submode)
++ __field(int, status)
++ __field(int, ns_level)
++ ),
++ TP_fast_assign(
++ tp_assign(tid, p->pid)
++ tp_assign(vtid, pid_ns ? task_pid_nr_ns(p, pid_ns) : 0)
++ tp_assign(pid, p->tgid)
++ tp_assign(vpid, pid_ns ? task_tgid_nr_ns(p, pid_ns) : 0)
++ tp_assign(ppid,
++ ({
++ pid_t ret;
++
++ rcu_read_lock();
++ ret = task_tgid_nr(p->real_parent);
++ rcu_read_unlock();
++ ret;
++ }))
++ tp_assign(vppid,
++ ({
++ struct task_struct *parent;
++ pid_t ret = 0;
++
++ if (pid_ns) {
++ rcu_read_lock();
++ parent = rcu_dereference(p->real_parent);
++ ret = task_tgid_nr_ns(parent, pid_ns);
++ rcu_read_unlock();
++ }
++ ret;
++ }))
++ tp_memcpy(name, p->comm, TASK_COMM_LEN)
++ tp_assign(type, type)
++ tp_assign(mode, mode)
++ tp_assign(submode, submode)
++ tp_assign(status, status)
++ tp_assign(ns_level, pid_ns ? pid_ns->level : 0)
++ ),
++ TP_printk("")
++)
++
++TRACE_EVENT(lttng_statedump_file_descriptor,
++ TP_PROTO(struct lttng_session *session,
++ struct task_struct *p, int fd, const char *filename),
++ TP_ARGS(session, p, fd, filename),
++ TP_STRUCT__entry(
++ __field(pid_t, pid)
++ __field(int, fd)
++ __string(filename, filename)
++ ),
++ TP_fast_assign(
++ tp_assign(pid, p->tgid)
++ tp_assign(fd, fd)
++ tp_strcpy(filename, filename)
++ ),
++ TP_printk("")
++)
++
++TRACE_EVENT(lttng_statedump_vm_map,
++ TP_PROTO(struct lttng_session *session,
++ struct task_struct *p, struct vm_area_struct *map,
++ unsigned long inode),
++ TP_ARGS(session, p, map, inode),
++ TP_STRUCT__entry(
++ __field(pid_t, pid)
++ __field_hex(unsigned long, start)
++ __field_hex(unsigned long, end)
++ __field_hex(unsigned long, flags)
++ __field(unsigned long, inode)
++ __field(unsigned long, pgoff)
++ ),
++ TP_fast_assign(
++ tp_assign(pid, p->tgid)
++ tp_assign(start, map->vm_start)
++ tp_assign(end, map->vm_end)
++ tp_assign(flags, map->vm_flags)
++ tp_assign(inode, inode)
++ tp_assign(pgoff, map->vm_pgoff << PAGE_SHIFT)
++ ),
++ TP_printk("")
++)
++
++TRACE_EVENT(lttng_statedump_network_interface,
++ TP_PROTO(struct lttng_session *session,
++ struct net_device *dev, struct in_ifaddr *ifa),
++ TP_ARGS(session, dev, ifa),
++ TP_STRUCT__entry(
++ __string(name, dev->name)
++ __field_network_hex(uint32_t, address_ipv4)
++ ),
++ TP_fast_assign(
++ tp_strcpy(name, dev->name)
++ tp_assign(address_ipv4, ifa ? ifa->ifa_address : 0U)
++ ),
++ TP_printk("")
++)
++
++/* Called with desc->lock held */
++TRACE_EVENT(lttng_statedump_interrupt,
++ TP_PROTO(struct lttng_session *session,
++ unsigned int irq, const char *chip_name,
++ struct irqaction *action),
++ TP_ARGS(session, irq, chip_name, action),
++ TP_STRUCT__entry(
++ __field(unsigned int, irq)
++ __string(name, chip_name)
++ __string(action, action->name ? : "")
++ ),
++ TP_fast_assign(
++ tp_assign(irq, irq)
++ tp_strcpy(name, chip_name)
++ tp_strcpy(action, action->name ? : "")
++ ),
++ TP_printk("")
++)
++
++#endif /* _TRACE_LTTNG_STATEDUMP_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/module.h
+@@ -0,0 +1,157 @@
++/*
++ * Because linux/module.h has tracepoints in the header, and ftrace.h
++ * eventually includes this file, define_trace.h includes linux/module.h
++ * But we do not want the module.h to override the TRACE_SYSTEM macro
++ * variable that define_trace.h is processing, so we only set it
++ * when module events are being processed, which would happen when
++ * CREATE_TRACE_POINTS is defined.
++ */
++#ifdef CREATE_TRACE_POINTS
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM module
++#endif
++
++#if !defined(_TRACE_MODULE_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_MODULE_H
++
++#include <linux/tracepoint.h>
++#include <linux/version.h>
++
++#ifdef CONFIG_MODULES
++
++#ifndef _TRACE_MODULE_DEF
++#define _TRACE_MODULE_DEF
++struct module;
++
++#define show_module_flags(flags) __print_flags(flags, "", \
++ { (1UL << TAINT_PROPRIETARY_MODULE), "P" }, \
++ { (1UL << TAINT_FORCED_MODULE), "F" }, \
++ { (1UL << TAINT_CRAP), "C" })
++#endif
++
++TRACE_EVENT(module_load,
++
++ TP_PROTO(struct module *mod),
++
++ TP_ARGS(mod),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, taints )
++ __string( name, mod->name )
++ ),
++
++ TP_fast_assign(
++ tp_assign(taints, mod->taints)
++ tp_strcpy(name, mod->name)
++ ),
++
++ TP_printk("%s %s", __get_str(name), show_module_flags(__entry->taints))
++)
++
++TRACE_EVENT(module_free,
++
++ TP_PROTO(struct module *mod),
++
++ TP_ARGS(mod),
++
++ TP_STRUCT__entry(
++ __string( name, mod->name )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, mod->name)
++ ),
++
++ TP_printk("%s", __get_str(name))
++)
++
++#ifdef CONFIG_MODULE_UNLOAD
++/* trace_module_get/put are only used if CONFIG_MODULE_UNLOAD is defined */
++
++DECLARE_EVENT_CLASS(module_refcnt,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ TP_PROTO(struct module *mod, unsigned long ip),
++
++ TP_ARGS(mod, ip),
++#else
++ TP_PROTO(struct module *mod, unsigned long ip, int refcnt),
++
++ TP_ARGS(mod, ip, refcnt),
++#endif
++
++ TP_STRUCT__entry(
++ __field( unsigned long, ip )
++ __field( int, refcnt )
++ __string( name, mod->name )
++ ),
++
++ TP_fast_assign(
++ tp_assign(ip, ip)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ tp_assign(refcnt, __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs))
++#else
++ tp_assign(refcnt, refcnt)
++#endif
++ tp_strcpy(name, mod->name)
++ ),
++
++ TP_printk("%s call_site=%pf refcnt=%d",
++ __get_str(name), (void *)__entry->ip, __entry->refcnt)
++)
++
++DEFINE_EVENT(module_refcnt, module_get,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ TP_PROTO(struct module *mod, unsigned long ip),
++
++ TP_ARGS(mod, ip)
++#else
++ TP_PROTO(struct module *mod, unsigned long ip, int refcnt),
++
++ TP_ARGS(mod, ip, refcnt)
++#endif
++)
++
++DEFINE_EVENT(module_refcnt, module_put,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ TP_PROTO(struct module *mod, unsigned long ip),
++
++ TP_ARGS(mod, ip)
++#else
++ TP_PROTO(struct module *mod, unsigned long ip, int refcnt),
++
++ TP_ARGS(mod, ip, refcnt)
++#endif
++)
++#endif /* CONFIG_MODULE_UNLOAD */
++
++TRACE_EVENT(module_request,
++
++ TP_PROTO(char *name, bool wait, unsigned long ip),
++
++ TP_ARGS(name, wait, ip),
++
++ TP_STRUCT__entry(
++ __field( unsigned long, ip )
++ __field( bool, wait )
++ __string( name, name )
++ ),
++
++ TP_fast_assign(
++ tp_assign(ip, ip)
++ tp_assign(wait, wait)
++ tp_strcpy(name, name)
++ ),
++
++ TP_printk("%s wait=%d call_site=%pf",
++ __get_str(name), (int)__entry->wait, (void *)__entry->ip)
++)
++
++#endif /* CONFIG_MODULES */
++
++#endif /* _TRACE_MODULE_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/napi.h
+@@ -0,0 +1,38 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM napi
++
++#if !defined(_TRACE_NAPI_H_) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_NAPI_H_
++
++#include <linux/netdevice.h>
++#include <linux/tracepoint.h>
++#include <linux/ftrace.h>
++
++#define NO_DEV "(no_device)"
++
++TRACE_EVENT(napi_poll,
++
++ TP_PROTO(struct napi_struct *napi),
++
++ TP_ARGS(napi),
++
++ TP_STRUCT__entry(
++ __field( struct napi_struct *, napi)
++ __string( dev_name, napi->dev ? napi->dev->name : NO_DEV)
++ ),
++
++ TP_fast_assign(
++ tp_assign(napi, napi)
++ tp_strcpy(dev_name, napi->dev ? napi->dev->name : NO_DEV)
++ ),
++
++ TP_printk("napi poll on napi struct %p for device %s",
++ __entry->napi, __get_str(dev_name))
++)
++
++#undef NO_DEV
++
++#endif /* _TRACE_NAPI_H_ */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/net.h
+@@ -0,0 +1,105 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM net
++
++#if !defined(_TRACE_NET_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_NET_H
++
++#include <linux/skbuff.h>
++#include <linux/netdevice.h>
++#include <linux/ip.h>
++#include <linux/tracepoint.h>
++#include <linux/version.h>
++
++TRACE_EVENT(net_dev_xmit,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,40))
++ TP_PROTO(struct sk_buff *skb,
++ int rc,
++ struct net_device *dev,
++ unsigned int skb_len),
++
++ TP_ARGS(skb, rc, dev, skb_len),
++#else
++ TP_PROTO(struct sk_buff *skb,
++ int rc),
++
++ TP_ARGS(skb, rc),
++#endif
++
++ TP_STRUCT__entry(
++ __field( void *, skbaddr )
++ __field( unsigned int, len )
++ __field( int, rc )
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,40))
++ __string( name, dev->name )
++#else
++ __string( name, skb->dev->name )
++#endif
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,40))
++ TP_fast_assign(
++ tp_assign(skbaddr, skb)
++ tp_assign(len, skb_len)
++ tp_assign(rc, rc)
++ tp_strcpy(name, dev->name)
++ ),
++#else
++ TP_fast_assign(
++ tp_assign(skbaddr, skb)
++ tp_assign(len, skb->len)
++ tp_assign(rc, rc)
++ tp_strcpy(name, skb->dev->name)
++ ),
++#endif
++
++ TP_printk("dev=%s skbaddr=%p len=%u rc=%d",
++ __get_str(name), __entry->skbaddr, __entry->len, __entry->rc)
++)
++
++DECLARE_EVENT_CLASS(net_dev_template,
++
++ TP_PROTO(struct sk_buff *skb),
++
++ TP_ARGS(skb),
++
++ TP_STRUCT__entry(
++ __field( void *, skbaddr )
++ __field( unsigned int, len )
++ __string( name, skb->dev->name )
++ ),
++
++ TP_fast_assign(
++ tp_assign(skbaddr, skb)
++ tp_assign(len, skb->len)
++ tp_strcpy(name, skb->dev->name)
++ ),
++
++ TP_printk("dev=%s skbaddr=%p len=%u",
++ __get_str(name), __entry->skbaddr, __entry->len)
++)
++
++DEFINE_EVENT(net_dev_template, net_dev_queue,
++
++ TP_PROTO(struct sk_buff *skb),
++
++ TP_ARGS(skb)
++)
++
++DEFINE_EVENT(net_dev_template, netif_receive_skb,
++
++ TP_PROTO(struct sk_buff *skb),
++
++ TP_ARGS(skb)
++)
++
++DEFINE_EVENT(net_dev_template, netif_rx,
++
++ TP_PROTO(struct sk_buff *skb),
++
++ TP_ARGS(skb)
++)
++#endif /* _TRACE_NET_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/power.h
+@@ -0,0 +1,351 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM power
++
++#if !defined(_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_POWER_H
++
++#include <linux/ktime.h>
++#include <linux/tracepoint.h>
++#include <linux/version.h>
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
++DECLARE_EVENT_CLASS(power_cpu,
++
++ TP_PROTO(unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(state, cpu_id),
++
++ TP_STRUCT__entry(
++ __field( u32, state )
++ __field( u32, cpu_id )
++ ),
++
++ TP_fast_assign(
++ tp_assign(state, state)
++ tp_assign(cpu_id, cpu_id)
++ ),
++
++ TP_printk("state=%lu cpu_id=%lu", (unsigned long)__entry->state,
++ (unsigned long)__entry->cpu_id)
++)
++
++DEFINE_EVENT_MAP(power_cpu, cpu_idle,
++
++ power_cpu_idle,
++
++ TP_PROTO(unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(state, cpu_id)
++)
++
++/* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */
++#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING
++#define _PWR_EVENT_AVOID_DOUBLE_DEFINING
++
++#define PWR_EVENT_EXIT -1
++#endif
++
++DEFINE_EVENT_MAP(power_cpu, cpu_frequency,
++
++ power_cpu_frequency,
++
++ TP_PROTO(unsigned int frequency, unsigned int cpu_id),
++
++ TP_ARGS(frequency, cpu_id)
++)
++
++TRACE_EVENT_MAP(machine_suspend,
++
++ power_machine_suspend,
++
++ TP_PROTO(unsigned int state),
++
++ TP_ARGS(state),
++
++ TP_STRUCT__entry(
++ __field( u32, state )
++ ),
++
++ TP_fast_assign(
++ tp_assign(state, state)
++ ),
++
++ TP_printk("state=%lu", (unsigned long)__entry->state)
++)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0))
++DECLARE_EVENT_CLASS(power_wakeup_source,
++
++ TP_PROTO(const char *name, unsigned int state),
++
++ TP_ARGS(name, state),
++
++ TP_STRUCT__entry(
++ __string( name, name )
++ __field( u64, state )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, name)
++ tp_assign(state, state)
++ ),
++
++ TP_printk("%s state=0x%lx", __get_str(name),
++ (unsigned long)__entry->state)
++)
++
++DEFINE_EVENT_MAP(power_wakeup_source, wakeup_source_activate,
++
++ power_wakeup_source_activate,
++
++ TP_PROTO(const char *name, unsigned int state),
++
++ TP_ARGS(name, state)
++)
++
++DEFINE_EVENT_MAP(power_wakeup_source, wakeup_source_deactivate,
++
++ power_wakeup_source_deactivate,
++
++ TP_PROTO(const char *name, unsigned int state),
++
++ TP_ARGS(name, state)
++)
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
++#undef CONFIG_EVENT_POWER_TRACING_DEPRECATED
++#define CONFIG_EVENT_POWER_TRACING_DEPRECATED
++#define _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
++#endif
++
++#ifdef CONFIG_EVENT_POWER_TRACING_DEPRECATED
++
++/*
++ * The power events are used for cpuidle & suspend (power_start, power_end)
++ * and for cpufreq (power_frequency)
++ */
++DECLARE_EVENT_CLASS(power,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
++ TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(type, state, cpu_id),
++#else
++ TP_PROTO(unsigned int type, unsigned int state),
++
++ TP_ARGS(type, state),
++#endif
++
++ TP_STRUCT__entry(
++ __field( u64, type )
++ __field( u64, state )
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
++ __field( u64, cpu_id )
++#endif
++ ),
++
++ TP_fast_assign(
++ tp_assign(type, type)
++ tp_assign(state, state)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
++ tp_assign(cpu_id, cpu_id)
++#endif
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
++ TP_printk("type=%lu state=%lu cpu_id=%lu", (unsigned long)__entry->type,
++ (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
++#else
++ TP_printk("type=%lu state=%lu", (unsigned long)__entry->type,
++ (unsigned long)__entry->state)
++#endif
++)
++
++DEFINE_EVENT(power, power_start,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
++ TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(type, state, cpu_id)
++#else
++ TP_PROTO(unsigned int type, unsigned int state),
++
++ TP_ARGS(type, state)
++#endif
++)
++
++DEFINE_EVENT(power, power_frequency,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
++ TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(type, state, cpu_id)
++#else
++ TP_PROTO(unsigned int type, unsigned int state),
++
++ TP_ARGS(type, state)
++#endif
++)
++
++TRACE_EVENT(power_end,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
++ TP_PROTO(unsigned int cpu_id),
++
++ TP_ARGS(cpu_id),
++#else
++ TP_PROTO(int dummy),
++
++ TP_ARGS(dummy),
++#endif
++
++ TP_STRUCT__entry(
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
++ __field( u64, cpu_id )
++#else
++ __field( u64, dummy )
++#endif
++ ),
++
++ TP_fast_assign(
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
++ tp_assign(cpu_id, cpu_id)
++#else
++ tp_assign(dummy, 0xffff)
++#endif
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
++ TP_printk("cpu_id=%lu", (unsigned long)__entry->cpu_id)
++#else
++ TP_printk("dummy=%lu", (unsigned long)__entry->dummy)
++#endif
++)
++
++/* Deprecated dummy functions must be protected against multi-declartion */
++#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
++#define _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
++
++enum {
++ POWER_NONE = 0,
++ POWER_CSTATE = 1,
++ POWER_PSTATE = 2,
++};
++#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */
++
++#else /* CONFIG_EVENT_POWER_TRACING_DEPRECATED */
++
++#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
++#define _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
++enum {
++ POWER_NONE = 0,
++ POWER_CSTATE = 1,
++ POWER_PSTATE = 2,
++};
++
++/* These dummy declaration have to be ripped out when the deprecated
++ events get removed */
++static inline void trace_power_start(u64 type, u64 state, u64 cpuid) {};
++static inline void trace_power_end(u64 cpuid) {};
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++static inline void trace_power_start_rcuidle(u64 type, u64 state, u64 cpuid) {};
++static inline void trace_power_end_rcuidle(u64 cpuid) {};
++#endif
++static inline void trace_power_frequency(u64 type, u64 state, u64 cpuid) {};
++#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */
++
++#endif /* CONFIG_EVENT_POWER_TRACING_DEPRECATED */
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++/*
++ * The clock events are used for clock enable/disable and for
++ * clock rate change
++ */
++DECLARE_EVENT_CLASS(power_clock,
++
++ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(name, state, cpu_id),
++
++ TP_STRUCT__entry(
++ __string( name, name )
++ __field( u64, state )
++ __field( u64, cpu_id )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, name)
++ tp_assign(state, state)
++ tp_assign(cpu_id, cpu_id)
++ ),
++
++ TP_printk("%s state=%lu cpu_id=%lu", __get_str(name),
++ (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
++)
++
++DEFINE_EVENT_MAP(power_clock, clock_enable,
++
++ power_clock_enable,
++
++ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(name, state, cpu_id)
++)
++
++DEFINE_EVENT_MAP(power_clock, clock_disable,
++
++ power_clock_disable,
++
++ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(name, state, cpu_id)
++)
++
++DEFINE_EVENT_MAP(power_clock, clock_set_rate,
++
++ power_clock_set_rate,
++
++ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(name, state, cpu_id)
++)
++
++/*
++ * The power domain events are used for power domains transitions
++ */
++DECLARE_EVENT_CLASS(power_domain,
++
++ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(name, state, cpu_id),
++
++ TP_STRUCT__entry(
++ __string( name, name )
++ __field( u64, state )
++ __field( u64, cpu_id )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, name)
++ tp_assign(state, state)
++ tp_assign(cpu_id, cpu_id)
++),
++
++ TP_printk("%s state=%lu cpu_id=%lu", __get_str(name),
++ (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
++)
++
++DEFINE_EVENT(power_domain, power_domain_target,
++
++ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(name, state, cpu_id)
++)
++#endif
++
++#endif /* _TRACE_POWER_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/printk.h
+@@ -0,0 +1,83 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM printk
++
++#if !defined(_TRACE_PRINTK_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_PRINTK_H
++
++#include <linux/tracepoint.h>
++#include <linux/version.h>
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
++
++TRACE_EVENT(console,
++ TP_PROTO(const char *text, size_t len),
++
++ TP_ARGS(text, len),
++
++ TP_STRUCT__entry(
++ __dynamic_array_text(char, msg, len)
++ ),
++
++ TP_fast_assign(
++ tp_memcpy_dyn(msg, text)
++ ),
++
++ TP_printk("%s", __get_str(msg))
++)
++
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0))
++
++TRACE_EVENT_CONDITION(console,
++ TP_PROTO(const char *log_buf, unsigned start, unsigned end,
++ unsigned log_buf_len),
++
++ TP_ARGS(log_buf, start, end, log_buf_len),
++
++ TP_CONDITION(start != end),
++
++ TP_STRUCT__entry(
++ __dynamic_array_text(char, msg, end - start)
++ ),
++
++ TP_fast_assign(
++ tp_memcpy_dyn(msg, log_buf + start)
++ ),
++
++ TP_printk("%s", __get_str(msg))
++)
++
++#else /* (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)) */
++
++TRACE_EVENT_CONDITION(console,
++ TP_PROTO(const char *log_buf, unsigned start, unsigned end,
++ unsigned log_buf_len),
++
++ TP_ARGS(log_buf, start, end, log_buf_len),
++
++ TP_CONDITION(start != end),
++
++ TP_STRUCT__entry(
++ __dynamic_array_text_2(char, msg,
++ (start & (log_buf_len - 1)) > (end & (log_buf_len - 1))
++ ? log_buf_len - (start & (log_buf_len - 1))
++ : end - start,
++ (start & (log_buf_len - 1)) > (end & (log_buf_len - 1))
++ ? end & (log_buf_len - 1)
++ : 0)
++ ),
++
++ TP_fast_assign(
++ tp_memcpy_dyn_2(msg,
++ log_buf + (start & (log_buf_len - 1)),
++ log_buf)
++ ),
++
++ TP_printk("%s", __get_str(msg))
++)
++
++#endif
++
++#endif /* _TRACE_PRINTK_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/random.h
+@@ -0,0 +1,152 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM random
++
++#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_RANDOM_H
++
++#include <linux/writeback.h>
++#include <linux/tracepoint.h>
++
++DECLARE_EVENT_CLASS(random__mix_pool_bytes,
++ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
++
++ TP_ARGS(pool_name, bytes, IP),
++
++ TP_STRUCT__entry(
++ __string( pool_name, pool_name )
++ __field( int, bytes )
++ __field(unsigned long, IP )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(pool_name, pool_name)
++ tp_assign(bytes, bytes)
++ tp_assign(IP, IP)
++ ),
++
++ TP_printk("%s pool: bytes %d caller %pF",
++ __get_str(pool_name), __entry->bytes, (void *)__entry->IP)
++)
++
++DEFINE_EVENT_MAP(random__mix_pool_bytes, mix_pool_bytes,
++
++ random_mix_pool_bytes,
++
++ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
++
++ TP_ARGS(pool_name, bytes, IP)
++)
++
++DEFINE_EVENT_MAP(random__mix_pool_bytes, mix_pool_bytes_nolock,
++
++ random_mix_pool_bytes_nolock,
++
++ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
++
++ TP_ARGS(pool_name, bytes, IP)
++)
++
++TRACE_EVENT_MAP(credit_entropy_bits,
++
++ random_credit_entropy_bits,
++
++ TP_PROTO(const char *pool_name, int bits, int entropy_count,
++ int entropy_total, unsigned long IP),
++
++ TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP),
++
++ TP_STRUCT__entry(
++ __string( pool_name, pool_name )
++ __field( int, bits )
++ __field( int, entropy_count )
++ __field( int, entropy_total )
++ __field(unsigned long, IP )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(pool_name, pool_name)
++ tp_assign(bits, bits)
++ tp_assign(entropy_count, entropy_count)
++ tp_assign(entropy_total, entropy_total)
++ tp_assign(IP, IP)
++ ),
++
++ TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
++ "caller %pF", __get_str(pool_name), __entry->bits,
++ __entry->entropy_count, __entry->entropy_total,
++ (void *)__entry->IP)
++)
++
++TRACE_EVENT_MAP(get_random_bytes,
++
++ random_get_random_bytes,
++
++ TP_PROTO(int nbytes, unsigned long IP),
++
++ TP_ARGS(nbytes, IP),
++
++ TP_STRUCT__entry(
++ __field( int, nbytes )
++ __field(unsigned long, IP )
++ ),
++
++ TP_fast_assign(
++ tp_assign(nbytes, nbytes)
++ tp_assign(IP, IP)
++ ),
++
++ TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
++)
++
++DECLARE_EVENT_CLASS(random__extract_entropy,
++ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
++ unsigned long IP),
++
++ TP_ARGS(pool_name, nbytes, entropy_count, IP),
++
++ TP_STRUCT__entry(
++ __string( pool_name, pool_name )
++ __field( int, nbytes )
++ __field( int, entropy_count )
++ __field(unsigned long, IP )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(pool_name, pool_name)
++ tp_assign(nbytes, nbytes)
++ tp_assign(entropy_count, entropy_count)
++ tp_assign(IP, IP)
++ ),
++
++ TP_printk("%s pool: nbytes %d entropy_count %d caller %pF",
++ __get_str(pool_name), __entry->nbytes, __entry->entropy_count,
++ (void *)__entry->IP)
++)
++
++
++DEFINE_EVENT_MAP(random__extract_entropy, extract_entropy,
++
++ random_extract_entropy,
++
++ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
++ unsigned long IP),
++
++ TP_ARGS(pool_name, nbytes, entropy_count, IP)
++)
++
++DEFINE_EVENT_MAP(random__extract_entropy, extract_entropy_user,
++
++ random_extract_entropy_user,
++
++ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
++ unsigned long IP),
++
++ TP_ARGS(pool_name, nbytes, entropy_count, IP)
++)
++
++
++
++#endif /* _TRACE_RANDOM_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/rcu.h
+@@ -0,0 +1,763 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM rcu
++
++#if !defined(_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_RCU_H
++
++#include <linux/tracepoint.h>
++#include <linux/version.h>
++
++/*
++ * Tracepoint for start/end markers used for utilization calculations.
++ * By convention, the string is of the following forms:
++ *
++ * "Start <activity>" -- Mark the start of the specified activity,
++ * such as "context switch". Nesting is permitted.
++ * "End <activity>" -- Mark the end of the specified activity.
++ *
++ * An "@" character within "<activity>" is a comment character: Data
++ * reduction scripts will ignore the "@" and the remainder of the line.
++ */
++TRACE_EVENT(rcu_utilization,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
++ TP_PROTO(const char *s),
++#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
++ TP_PROTO(char *s),
++#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
++
++ TP_ARGS(s),
++
++ TP_STRUCT__entry(
++ __string(s, s)
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(s, s)
++ ),
++
++ TP_printk("%s", __get_str(s))
++)
++
++#ifdef CONFIG_RCU_TRACE
++
++#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
++
++/*
++ * Tracepoint for grace-period events: starting and ending a grace
++ * period ("start" and "end", respectively), a CPU noting the start
++ * of a new grace period or the end of an old grace period ("cpustart"
++ * and "cpuend", respectively), a CPU passing through a quiescent
++ * state ("cpuqs"), a CPU coming online or going offline ("cpuonl"
++ * and "cpuofl", respectively), and a CPU being kicked for being too
++ * long in dyntick-idle mode ("kick").
++ */
++TRACE_EVENT(rcu_grace_period,
++
++ TP_PROTO(char *rcuname, unsigned long gpnum, char *gpevent),
++
++ TP_ARGS(rcuname, gpnum, gpevent),
++
++ TP_STRUCT__entry(
++ __string(rcuname, rcuname)
++ __field(unsigned long, gpnum)
++ __string(gpevent, gpevent)
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(rcuname, rcuname)
++ tp_assign(gpnum, gpnum)
++ tp_strcpy(gpevent, gpevent)
++ ),
++
++ TP_printk("%s %lu %s",
++ __get_str(rcuname), __entry->gpnum, __get_str(gpevent))
++)
++
++/*
++ * Tracepoint for grace-period-initialization events. These are
++ * distinguished by the type of RCU, the new grace-period number, the
++ * rcu_node structure level, the starting and ending CPU covered by the
++ * rcu_node structure, and the mask of CPUs that will be waited for.
++ * All but the type of RCU are extracted from the rcu_node structure.
++ */
++TRACE_EVENT(rcu_grace_period_init,
++
++ TP_PROTO(char *rcuname, unsigned long gpnum, u8 level,
++ int grplo, int grphi, unsigned long qsmask),
++
++ TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
++
++ TP_STRUCT__entry(
++ __string(rcuname, rcuname)
++ __field(unsigned long, gpnum)
++ __field(u8, level)
++ __field(int, grplo)
++ __field(int, grphi)
++ __field(unsigned long, qsmask)
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(rcuname, rcuname)
++ tp_assign(gpnum, gpnum)
++ tp_assign(level, level)
++ tp_assign(grplo, grplo)
++ tp_assign(grphi, grphi)
++ tp_assign(qsmask, qsmask)
++ ),
++
++ TP_printk("%s %lu %u %d %d %lx",
++ __get_str(rcuname), __entry->gpnum, __entry->level,
++ __entry->grplo, __entry->grphi, __entry->qsmask)
++)
++
++/*
++ * Tracepoint for tasks blocking within preemptible-RCU read-side
++ * critical sections. Track the type of RCU (which one day might
++ * include SRCU), the grace-period number that the task is blocking
++ * (the current or the next), and the task's PID.
++ */
++TRACE_EVENT(rcu_preempt_task,
++
++ TP_PROTO(char *rcuname, int pid, unsigned long gpnum),
++
++ TP_ARGS(rcuname, pid, gpnum),
++
++ TP_STRUCT__entry(
++ __string(rcuname, rcuname)
++ __field(unsigned long, gpnum)
++ __field(int, pid)
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(rcuname, rcuname)
++ tp_assign(gpnum, gpnum)
++ tp_assign(pid, pid)
++ ),
++
++ TP_printk("%s %lu %d",
++ __get_str(rcuname), __entry->gpnum, __entry->pid)
++)
++
++/*
++ * Tracepoint for tasks that blocked within a given preemptible-RCU
++ * read-side critical section exiting that critical section. Track the
++ * type of RCU (which one day might include SRCU) and the task's PID.
++ */
++TRACE_EVENT(rcu_unlock_preempted_task,
++
++ TP_PROTO(char *rcuname, unsigned long gpnum, int pid),
++
++ TP_ARGS(rcuname, gpnum, pid),
++
++ TP_STRUCT__entry(
++ __string(rcuname, rcuname)
++ __field(unsigned long, gpnum)
++ __field(int, pid)
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(rcuname, rcuname)
++ tp_assign(gpnum, gpnum)
++ tp_assign(pid, pid)
++ ),
++
++ TP_printk("%s %lu %d", __get_str(rcuname), __entry->gpnum, __entry->pid)
++)
++
++/*
++ * Tracepoint for quiescent-state-reporting events. These are
++ * distinguished by the type of RCU, the grace-period number, the
++ * mask of quiescent lower-level entities, the rcu_node structure level,
++ * the starting and ending CPU covered by the rcu_node structure, and
++ * whether there are any blocked tasks blocking the current grace period.
++ * All but the type of RCU are extracted from the rcu_node structure.
++ */
++TRACE_EVENT(rcu_quiescent_state_report,
++
++ TP_PROTO(char *rcuname, unsigned long gpnum,
++ unsigned long mask, unsigned long qsmask,
++ u8 level, int grplo, int grphi, int gp_tasks),
++
++ TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
++
++ TP_STRUCT__entry(
++ __string(rcuname, rcuname)
++ __field(unsigned long, gpnum)
++ __field(unsigned long, mask)
++ __field(unsigned long, qsmask)
++ __field(u8, level)
++ __field(int, grplo)
++ __field(int, grphi)
++ __field(u8, gp_tasks)
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(rcuname, rcuname)
++ tp_assign(gpnum, gpnum)
++ tp_assign(mask, mask)
++ tp_assign(qsmask, qsmask)
++ tp_assign(level, level)
++ tp_assign(grplo, grplo)
++ tp_assign(grphi, grphi)
++ tp_assign(gp_tasks, gp_tasks)
++ ),
++
++ TP_printk("%s %lu %lx>%lx %u %d %d %u",
++ __get_str(rcuname), __entry->gpnum,
++ __entry->mask, __entry->qsmask, __entry->level,
++ __entry->grplo, __entry->grphi, __entry->gp_tasks)
++)
++
++/*
++ * Tracepoint for quiescent states detected by force_quiescent_state().
++ * These trace events include the type of RCU, the grace-period number
++ * that was blocked by the CPU, the CPU itself, and the type of quiescent
++ * state, which can be "dti" for dyntick-idle mode, "ofl" for CPU offline,
++ * or "kick" when kicking a CPU that has been in dyntick-idle mode for
++ * too long.
++ */
++TRACE_EVENT(rcu_fqs,
++
++ TP_PROTO(char *rcuname, unsigned long gpnum, int cpu, char *qsevent),
++
++ TP_ARGS(rcuname, gpnum, cpu, qsevent),
++
++ TP_STRUCT__entry(
++ __string(rcuname, rcuname)
++ __field(unsigned long, gpnum)
++ __field(int, cpu)
++ __string(qsevent, qsevent)
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(rcuname, rcuname)
++ tp_assign(gpnum, gpnum)
++ tp_assign(cpu, cpu)
++ tp_strcpy(qsevent, qsevent)
++ ),
++
++ TP_printk("%s %lu %d %s",
++ __get_str(rcuname), __entry->gpnum,
++ __entry->cpu, __get_str(qsevent))
++)
++
++#endif /* #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) */
++
++/*
++ * Tracepoint for dyntick-idle entry/exit events. These take a string
++ * as argument: "Start" for entering dyntick-idle mode, "End" for
++ * leaving it, "--=" for events moving towards idle, and "++=" for events
++ * moving away from idle. "Error on entry: not idle task" and "Error on
++ * exit: not idle task" indicate that a non-idle task is erroneously
++ * toying with the idle loop.
++ *
++ * These events also take a pair of numbers, which indicate the nesting
++ * depth before and after the event of interest. Note that task-related
++ * events use the upper bits of each number, while interrupt-related
++ * events use the lower bits.
++ */
++TRACE_EVENT(rcu_dyntick,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++ TP_PROTO(char *polarity, long long oldnesting, long long newnesting),
++
++ TP_ARGS(polarity, oldnesting, newnesting),
++#else
++ TP_PROTO(char *polarity),
++
++ TP_ARGS(polarity),
++#endif
++
++ TP_STRUCT__entry(
++ __string(polarity, polarity)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++ __field(long long, oldnesting)
++ __field(long long, newnesting)
++#endif
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(polarity, polarity)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++ tp_assign(oldnesting, oldnesting)
++ tp_assign(newnesting, newnesting)
++#endif
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++ TP_printk("%s %llx %llx", __get_str(polarity),
++ __entry->oldnesting, __entry->newnesting)
++#else
++ TP_printk("%s", __get_str(polarity))
++#endif
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++/*
++ * Tracepoint for RCU preparation for idle, the goal being to get RCU
++ * processing done so that the current CPU can shut off its scheduling
++ * clock and enter dyntick-idle mode. One way to accomplish this is
++ * to drain all RCU callbacks from this CPU, and the other is to have
++ * done everything RCU requires for the current grace period. In this
++ * latter case, the CPU will be awakened at the end of the current grace
++ * period in order to process the remainder of its callbacks.
++ *
++ * These tracepoints take a string as argument:
++ *
++ * "No callbacks": Nothing to do, no callbacks on this CPU.
++ * "In holdoff": Nothing to do, holding off after unsuccessful attempt.
++ * "Begin holdoff": Attempt failed, don't retry until next jiffy.
++ * "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
++ * "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
++ * "More callbacks": Still more callbacks, try again to clear them out.
++ * "Callbacks drained": All callbacks processed, off to dyntick idle!
++ * "Timer": Timer fired to cause CPU to continue processing callbacks.
++ * "Demigrate": Timer fired on wrong CPU, woke up correct CPU.
++ * "Cleanup after idle": Idle exited, timer canceled.
++ */
++TRACE_EVENT(rcu_prep_idle,
++
++ TP_PROTO(char *reason),
++
++ TP_ARGS(reason),
++
++ TP_STRUCT__entry(
++ __string(reason, reason)
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(reason, reason)
++ ),
++
++ TP_printk("%s", __get_str(reason))
++)
++#endif
++
++/*
++ * Tracepoint for the registration of a single RCU callback function.
++ * The first argument is the type of RCU, the second argument is
++ * a pointer to the RCU callback itself, the third element is the
++ * number of lazy callbacks queued, and the fourth element is the
++ * total number of callbacks queued.
++ */
++TRACE_EVENT(rcu_callback,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++ TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen_lazy,
++ long qlen),
++
++ TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
++#else
++ TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen),
++
++ TP_ARGS(rcuname, rhp, qlen),
++#endif
++
++ TP_STRUCT__entry(
++ __string(rcuname, rcuname)
++ __field(void *, rhp)
++ __field(void *, func)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++ __field(long, qlen_lazy)
++#endif
++ __field(long, qlen)
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(rcuname, rcuname)
++ tp_assign(rhp, rhp)
++ tp_assign(func, rhp->func)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++ tp_assign(qlen_lazy, qlen_lazy)
++#endif
++ tp_assign(qlen, qlen)
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++ TP_printk("%s rhp=%p func=%pf %ld/%ld",
++ __get_str(rcuname), __entry->rhp, __entry->func,
++ __entry->qlen_lazy, __entry->qlen)
++#else
++ TP_printk("%s rhp=%p func=%pf %ld",
++ __get_str(rcuname), __entry->rhp, __entry->func,
++ __entry->qlen)
++#endif
++)
++
++/*
++ * Tracepoint for the registration of a single RCU callback of the special
++ * kfree() form. The first argument is the RCU type, the second argument
++ * is a pointer to the RCU callback, the third argument is the offset
++ * of the callback within the enclosing RCU-protected data structure,
++ * the fourth argument is the number of lazy callbacks queued, and the
++ * fifth argument is the total number of callbacks queued.
++ */
++TRACE_EVENT(rcu_kfree_callback,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++ TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
++ long qlen_lazy, long qlen),
++
++ TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
++#else
++ TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
++ long qlen),
++
++ TP_ARGS(rcuname, rhp, offset, qlen),
++#endif
++
++ TP_STRUCT__entry(
++ __string(rcuname, rcuname)
++ __field(void *, rhp)
++ __field(unsigned long, offset)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++ __field(long, qlen_lazy)
++#endif
++ __field(long, qlen)
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(rcuname, rcuname)
++ tp_assign(rhp, rhp)
++ tp_assign(offset, offset)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++ tp_assign(qlen_lazy, qlen_lazy)
++#endif
++ tp_assign(qlen, qlen)
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++ TP_printk("%s rhp=%p func=%ld %ld/%ld",
++ __get_str(rcuname), __entry->rhp, __entry->offset,
++ __entry->qlen_lazy, __entry->qlen)
++#else
++ TP_printk("%s rhp=%p func=%ld %ld",
++ __get_str(rcuname), __entry->rhp, __entry->offset,
++ __entry->qlen)
++#endif
++)
++
++/*
++ * Tracepoint for marking the beginning rcu_do_batch, performed to start
++ * RCU callback invocation. The first argument is the RCU flavor,
++ * the second is the number of lazy callbacks queued, the third is
++ * the total number of callbacks queued, and the fourth argument is
++ * the current RCU-callback batch limit.
++ */
++TRACE_EVENT(rcu_batch_start,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
++ TP_PROTO(char *rcuname, long qlen_lazy, long qlen, long blimit),
++
++ TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++ TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit),
++
++ TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
++#else
++ TP_PROTO(char *rcuname, long qlen, int blimit),
++
++ TP_ARGS(rcuname, qlen, blimit),
++#endif
++
++ TP_STRUCT__entry(
++ __string(rcuname, rcuname)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++ __field(long, qlen_lazy)
++#endif
++ __field(long, qlen)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
++ __field(long, blimit)
++#else
++ __field(int, blimit)
++#endif
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(rcuname, rcuname)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++ tp_assign(qlen_lazy, qlen_lazy)
++#endif
++ tp_assign(qlen, qlen)
++ tp_assign(blimit, blimit)
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
++ TP_printk("%s CBs=%ld/%ld bl=%ld",
++ __get_str(rcuname), __entry->qlen_lazy, __entry->qlen,
++ __entry->blimit)
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++ TP_printk("%s CBs=%ld/%ld bl=%d",
++ __get_str(rcuname), __entry->qlen_lazy, __entry->qlen,
++ __entry->blimit)
++#else
++ TP_printk("%s CBs=%ld bl=%d",
++ __get_str(rcuname), __entry->qlen, __entry->blimit)
++#endif
++)
++
++/*
++ * Tracepoint for the invocation of a single RCU callback function.
++ * The first argument is the type of RCU, and the second argument is
++ * a pointer to the RCU callback itself.
++ */
++TRACE_EVENT(rcu_invoke_callback,
++
++ TP_PROTO(char *rcuname, struct rcu_head *rhp),
++
++ TP_ARGS(rcuname, rhp),
++
++ TP_STRUCT__entry(
++ __string(rcuname, rcuname)
++ __field(void *, rhp)
++ __field(void *, func)
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(rcuname, rcuname)
++ tp_assign(rhp, rhp)
++ tp_assign(func, rhp->func)
++ ),
++
++ TP_printk("%s rhp=%p func=%pf",
++ __get_str(rcuname), __entry->rhp, __entry->func)
++)
++
++/*
++ * Tracepoint for the invocation of a single RCU callback of the special
++ * kfree() form. The first argument is the RCU flavor, the second
++ * argument is a pointer to the RCU callback, and the third argument
++ * is the offset of the callback within the enclosing RCU-protected
++ * data structure.
++ */
++TRACE_EVENT(rcu_invoke_kfree_callback,
++
++ TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset),
++
++ TP_ARGS(rcuname, rhp, offset),
++
++ TP_STRUCT__entry(
++ __string(rcuname, rcuname)
++ __field(void *, rhp)
++ __field(unsigned long, offset)
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(rcuname, rcuname)
++ tp_assign(rhp, rhp)
++ tp_assign(offset, offset)
++ ),
++
++ TP_printk("%s rhp=%p func=%ld",
++ __get_str(rcuname), __entry->rhp, __entry->offset)
++)
++
++/*
++ * Tracepoint for exiting rcu_do_batch after RCU callbacks have been
++ * invoked. The first argument is the name of the RCU flavor,
++ * the second argument is number of callbacks actually invoked,
++ * the third argument (cb) is whether or not any of the callbacks that
++ * were ready to invoke at the beginning of this batch are still
++ * queued, the fourth argument (nr) is the return value of need_resched(),
++ * the fifth argument (iit) is 1 if the current task is the idle task,
++ * and the sixth argument (risk) is the return value from
++ * rcu_is_callbacks_kthread().
++ */
++TRACE_EVENT(rcu_batch_end,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++ TP_PROTO(char *rcuname, int callbacks_invoked,
++ bool cb, bool nr, bool iit, bool risk),
++
++ TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
++#else
++ TP_PROTO(char *rcuname, int callbacks_invoked),
++
++ TP_ARGS(rcuname, callbacks_invoked),
++#endif
++
++ TP_STRUCT__entry(
++ __string(rcuname, rcuname)
++ __field(int, callbacks_invoked)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++ __field(bool, cb)
++ __field(bool, nr)
++ __field(bool, iit)
++ __field(bool, risk)
++#endif
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(rcuname, rcuname)
++ tp_assign(callbacks_invoked, callbacks_invoked)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++ tp_assign(cb, cb)
++ tp_assign(nr, nr)
++ tp_assign(iit, iit)
++ tp_assign(risk, risk)
++#endif
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++ TP_printk("%s CBs-invoked=%d idle=%c%c%c%c",
++ __get_str(rcuname), __entry->callbacks_invoked,
++ __entry->cb ? 'C' : '.',
++ __entry->nr ? 'S' : '.',
++ __entry->iit ? 'I' : '.',
++ __entry->risk ? 'R' : '.')
++#else
++ TP_printk("%s CBs-invoked=%d",
++ __get_str(rcuname), __entry->callbacks_invoked)
++#endif
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++/*
++ * Tracepoint for rcutorture readers. The first argument is the name
++ * of the RCU flavor from rcutorture's viewpoint and the second argument
++ * is the callback address.
++ */
++TRACE_EVENT(rcu_torture_read,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
++ TP_PROTO(char *rcutorturename, struct rcu_head *rhp,
++ unsigned long secs, unsigned long c_old, unsigned long c),
++
++ TP_ARGS(rcutorturename, rhp, secs, c_old, c),
++#else
++ TP_PROTO(char *rcutorturename, struct rcu_head *rhp),
++
++ TP_ARGS(rcutorturename, rhp),
++#endif
++
++ TP_STRUCT__entry(
++ __string(rcutorturename, rcutorturename)
++ __field(struct rcu_head *, rhp)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
++ __field(unsigned long, secs)
++ __field(unsigned long, c_old)
++ __field(unsigned long, c)
++#endif
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(rcutorturename, rcutorturename)
++ tp_assign(rhp, rhp)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
++ tp_assign(secs, secs)
++ tp_assign(c_old, c_old)
++ tp_assign(c, c)
++#endif
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
++ TP_printk("%s torture read %p %luus c: %lu %lu",
++ __entry->rcutorturename, __entry->rhp,
++ __entry->secs, __entry->c_old, __entry->c)
++#else
++ TP_printk("%s torture read %p",
++ __get_str(rcutorturename), __entry->rhp)
++#endif
++)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
++/*
++ * Tracepoint for _rcu_barrier() execution. The string "s" describes
++ * the _rcu_barrier phase:
++ * "Begin": rcu_barrier_callback() started.
++ * "Check": rcu_barrier_callback() checking for piggybacking.
++ * "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit.
++ * "Inc1": rcu_barrier_callback() piggyback check counter incremented.
++ * "Offline": rcu_barrier_callback() found offline CPU
++ * "OnlineQ": rcu_barrier_callback() found online CPU with callbacks.
++ * "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks.
++ * "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
++ * "CB": An rcu_barrier_callback() invoked a callback, not the last.
++ * "LastCB": An rcu_barrier_callback() invoked the last callback.
++ * "Inc2": rcu_barrier_callback() piggyback check counter incremented.
++ * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
++ * is the count of remaining callbacks, and "done" is the piggybacking count.
++ */
++TRACE_EVENT(rcu_barrier,
++
++ TP_PROTO(char *rcuname, char *s, int cpu, int cnt, unsigned long done),
++
++ TP_ARGS(rcuname, s, cpu, cnt, done),
++
++ TP_STRUCT__entry(
++ __string(rcuname, rcuname)
++ __string(s, s)
++ __field(int, cpu)
++ __field(int, cnt)
++ __field(unsigned long, done)
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(rcuname, rcuname)
++ tp_strcpy(s, s)
++ tp_assign(cpu, cpu)
++ tp_assign(cnt, cnt)
++ tp_assign(done, done)
++ ),
++
++ TP_printk("%s %s cpu %d remaining %d # %lu",
++ __get_str(rcuname), __get_str(s), __entry->cpu, __entry->cnt,
++ __entry->done)
++)
++#endif
++
++#else /* #ifdef CONFIG_RCU_TRACE */
++
++#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
++#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
++ qsmask) do { } while (0)
++#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
++#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
++#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
++ grplo, grphi, gp_tasks) do { } \
++ while (0)
++#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
++#else
++#define trace_rcu_dyntick(polarity) do { } while (0)
++#endif
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++#define trace_rcu_prep_idle(reason) do { } while (0)
++#endif
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
++#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
++ do { } while (0)
++#define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
++ do { } while (0)
++#else
++#define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)
++#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)
++#define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0)
++#endif
++#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
++#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
++ do { } while (0)
++#else
++#define trace_rcu_batch_end(rcuname, callbacks_invoked) do { } while (0)
++#endif
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
++#define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
++ do { } while (0)
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++#define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
++#endif
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
++#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
++#endif
++#endif /* #else #ifdef CONFIG_RCU_TRACE */
++
++#endif /* _TRACE_RCU_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/regmap.h
+@@ -0,0 +1,188 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM regmap
++
++#if !defined(_TRACE_REGMAP_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_REGMAP_H
++
++#include <linux/ktime.h>
++#include <linux/tracepoint.h>
++#include <linux/version.h>
++
++#ifndef _TRACE_REGMAP_DEF_
++#define _TRACE_REGMAP_DEF_
++struct device;
++struct regmap;
++#endif
++
++/*
++ * Log register events
++ */
++DECLARE_EVENT_CLASS(regmap_reg,
++
++ TP_PROTO(struct device *dev, unsigned int reg,
++ unsigned int val),
++
++ TP_ARGS(dev, reg, val),
++
++ TP_STRUCT__entry(
++ __string( name, dev_name(dev) )
++ __field( unsigned int, reg )
++ __field( unsigned int, val )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, dev_name(dev))
++ tp_assign(reg, reg)
++ tp_assign(val, val)
++ ),
++
++ TP_printk("%s reg=%x val=%x", __get_str(name),
++ (unsigned int)__entry->reg,
++ (unsigned int)__entry->val)
++)
++
++DEFINE_EVENT(regmap_reg, regmap_reg_write,
++
++ TP_PROTO(struct device *dev, unsigned int reg,
++ unsigned int val),
++
++ TP_ARGS(dev, reg, val)
++
++)
++
++DEFINE_EVENT(regmap_reg, regmap_reg_read,
++
++ TP_PROTO(struct device *dev, unsigned int reg,
++ unsigned int val),
++
++ TP_ARGS(dev, reg, val)
++
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
++
++ TP_PROTO(struct device *dev, unsigned int reg,
++ unsigned int val),
++
++ TP_ARGS(dev, reg, val)
++
++)
++#endif
++
++DECLARE_EVENT_CLASS(regmap_block,
++
++ TP_PROTO(struct device *dev, unsigned int reg, int count),
++
++ TP_ARGS(dev, reg, count),
++
++ TP_STRUCT__entry(
++ __string( name, dev_name(dev) )
++ __field( unsigned int, reg )
++ __field( int, count )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, dev_name(dev))
++ tp_assign(reg, reg)
++ tp_assign(count, count)
++ ),
++
++ TP_printk("%s reg=%x count=%d", __get_str(name),
++ (unsigned int)__entry->reg,
++ (int)__entry->count)
++)
++
++DEFINE_EVENT(regmap_block, regmap_hw_read_start,
++
++ TP_PROTO(struct device *dev, unsigned int reg, int count),
++
++ TP_ARGS(dev, reg, count)
++)
++
++DEFINE_EVENT(regmap_block, regmap_hw_read_done,
++
++ TP_PROTO(struct device *dev, unsigned int reg, int count),
++
++ TP_ARGS(dev, reg, count)
++)
++
++DEFINE_EVENT(regmap_block, regmap_hw_write_start,
++
++ TP_PROTO(struct device *dev, unsigned int reg, int count),
++
++ TP_ARGS(dev, reg, count)
++)
++
++DEFINE_EVENT(regmap_block, regmap_hw_write_done,
++
++ TP_PROTO(struct device *dev, unsigned int reg, int count),
++
++ TP_ARGS(dev, reg, count)
++)
++
++TRACE_EVENT(regcache_sync,
++
++ TP_PROTO(struct device *dev, const char *type,
++ const char *status),
++
++ TP_ARGS(dev, type, status),
++
++ TP_STRUCT__entry(
++ __string( name, dev_name(dev) )
++ __string( status, status )
++ __string( type, type )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, dev_name(dev))
++ tp_strcpy(status, status)
++ tp_strcpy(type, type)
++ ),
++
++ TP_printk("%s type=%s status=%s", __get_str(name),
++ __get_str(type), __get_str(status))
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++DECLARE_EVENT_CLASS(regmap_bool,
++
++ TP_PROTO(struct device *dev, bool flag),
++
++ TP_ARGS(dev, flag),
++
++ TP_STRUCT__entry(
++ __string( name, dev_name(dev) )
++ __field( int, flag )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, dev_name(dev))
++ tp_assign(flag, flag)
++ ),
++
++ TP_printk("%s flag=%d", __get_str(name),
++ (int)__entry->flag)
++)
++
++DEFINE_EVENT(regmap_bool, regmap_cache_only,
++
++ TP_PROTO(struct device *dev, bool flag),
++
++ TP_ARGS(dev, flag)
++
++)
++
++DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
++
++ TP_PROTO(struct device *dev, bool flag),
++
++ TP_ARGS(dev, flag)
++
++)
++#endif
++
++#endif /* _TRACE_REGMAP_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/regulator.h
+@@ -0,0 +1,141 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM regulator
++
++#if !defined(_TRACE_REGULATOR_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_REGULATOR_H
++
++#include <linux/ktime.h>
++#include <linux/tracepoint.h>
++
++/*
++ * Events which just log themselves and the regulator name for enable/disable
++ * type tracking.
++ */
++DECLARE_EVENT_CLASS(regulator_basic,
++
++ TP_PROTO(const char *name),
++
++ TP_ARGS(name),
++
++ TP_STRUCT__entry(
++ __string( name, name )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, name)
++ ),
++
++ TP_printk("name=%s", __get_str(name))
++
++)
++
++DEFINE_EVENT(regulator_basic, regulator_enable,
++
++ TP_PROTO(const char *name),
++
++ TP_ARGS(name)
++
++)
++
++DEFINE_EVENT(regulator_basic, regulator_enable_delay,
++
++ TP_PROTO(const char *name),
++
++ TP_ARGS(name)
++
++)
++
++DEFINE_EVENT(regulator_basic, regulator_enable_complete,
++
++ TP_PROTO(const char *name),
++
++ TP_ARGS(name)
++
++)
++
++DEFINE_EVENT(regulator_basic, regulator_disable,
++
++ TP_PROTO(const char *name),
++
++ TP_ARGS(name)
++
++)
++
++DEFINE_EVENT(regulator_basic, regulator_disable_complete,
++
++ TP_PROTO(const char *name),
++
++ TP_ARGS(name)
++
++)
++
++/*
++ * Events that take a range of numerical values, mostly for voltages
++ * and so on.
++ */
++DECLARE_EVENT_CLASS(regulator_range,
++
++ TP_PROTO(const char *name, int min, int max),
++
++ TP_ARGS(name, min, max),
++
++ TP_STRUCT__entry(
++ __string( name, name )
++ __field( int, min )
++ __field( int, max )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, name)
++ tp_assign(min, min)
++ tp_assign(max, max)
++ ),
++
++ TP_printk("name=%s (%d-%d)", __get_str(name),
++ (int)__entry->min, (int)__entry->max)
++)
++
++DEFINE_EVENT(regulator_range, regulator_set_voltage,
++
++ TP_PROTO(const char *name, int min, int max),
++
++ TP_ARGS(name, min, max)
++
++)
++
++
++/*
++ * Events that take a single value, mostly for readback and refcounts.
++ */
++DECLARE_EVENT_CLASS(regulator_value,
++
++ TP_PROTO(const char *name, unsigned int val),
++
++ TP_ARGS(name, val),
++
++ TP_STRUCT__entry(
++ __string( name, name )
++ __field( unsigned int, val )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, name)
++ tp_assign(val, val)
++ ),
++
++ TP_printk("name=%s, val=%u", __get_str(name),
++ (int)__entry->val)
++)
++
++DEFINE_EVENT(regulator_value, regulator_set_voltage_complete,
++
++ TP_PROTO(const char *name, unsigned int value),
++
++ TP_ARGS(name, value)
++
++)
++
++#endif /* _TRACE_POWER_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/rpm.h
+@@ -0,0 +1,101 @@
++
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM rpm
++
++#if !defined(_TRACE_RUNTIME_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_RUNTIME_POWER_H
++
++#include <linux/ktime.h>
++#include <linux/tracepoint.h>
++
++#ifndef _TRACE_RPM_DEF_
++#define _TRACE_RPM_DEF_
++struct device;
++#endif
++
++/*
++ * The rpm_internal events are used for tracing some important
++ * runtime pm internal functions.
++ */
++DECLARE_EVENT_CLASS(rpm_internal,
++
++ TP_PROTO(struct device *dev, int flags),
++
++ TP_ARGS(dev, flags),
++
++ TP_STRUCT__entry(
++ __string( name, dev_name(dev) )
++ __field( int, flags )
++ __field( int , usage_count )
++ __field( int , disable_depth )
++ __field( int , runtime_auto )
++ __field( int , request_pending )
++ __field( int , irq_safe )
++ __field( int , child_count )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, dev_name(dev))
++ tp_assign(flags, flags)
++ tp_assign(usage_count, atomic_read(&dev->power.usage_count))
++ tp_assign(disable_depth, dev->power.disable_depth)
++ tp_assign(runtime_auto, dev->power.runtime_auto)
++ tp_assign(request_pending, dev->power.request_pending)
++ tp_assign(irq_safe, dev->power.irq_safe)
++ tp_assign(child_count, atomic_read(&dev->power.child_count))
++ ),
++
++ TP_printk("%s flags-%x cnt-%-2d dep-%-2d auto-%-1d p-%-1d"
++ " irq-%-1d child-%d",
++ __get_str(name), __entry->flags,
++ __entry->usage_count,
++ __entry->disable_depth,
++ __entry->runtime_auto,
++ __entry->request_pending,
++ __entry->irq_safe,
++ __entry->child_count
++ )
++)
++DEFINE_EVENT(rpm_internal, rpm_suspend,
++
++ TP_PROTO(struct device *dev, int flags),
++
++ TP_ARGS(dev, flags)
++)
++DEFINE_EVENT(rpm_internal, rpm_resume,
++
++ TP_PROTO(struct device *dev, int flags),
++
++ TP_ARGS(dev, flags)
++)
++DEFINE_EVENT(rpm_internal, rpm_idle,
++
++ TP_PROTO(struct device *dev, int flags),
++
++ TP_ARGS(dev, flags)
++)
++
++TRACE_EVENT(rpm_return_int,
++ TP_PROTO(struct device *dev, unsigned long ip, int ret),
++ TP_ARGS(dev, ip, ret),
++
++ TP_STRUCT__entry(
++ __string( name, dev_name(dev))
++ __field( unsigned long, ip )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, dev_name(dev))
++ tp_assign(ip, ip)
++ tp_assign(ret, ret)
++ ),
++
++ TP_printk("%pS:%s ret=%d", (void *)__entry->ip, __get_str(name),
++ __entry->ret)
++)
++
++#endif /* _TRACE_RUNTIME_POWER_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/sched.h
+@@ -0,0 +1,560 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM sched
++
++#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SCHED_H
++
++#include <linux/sched.h>
++#include <linux/tracepoint.h>
++#include <linux/binfmts.h>
++#include <linux/version.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
++#include <linux/sched/rt.h>
++#endif
++
++#ifndef _TRACE_SCHED_DEF_
++#define _TRACE_SCHED_DEF_
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++
++static inline long __trace_sched_switch_state(struct task_struct *p)
++{
++ long state = p->state;
++
++#ifdef CONFIG_PREEMPT
++ /*
++ * For all intents and purposes a preempted task is a running task.
++ */
++ if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
++ state = TASK_RUNNING | TASK_STATE_MAX;
++#else
++ state = TASK_RUNNING;
++#endif
++#endif
++
++ return state;
++}
++
++#endif
++
++#endif /* _TRACE_SCHED_DEF_ */
++
++/*
++ * Tracepoint for calling kthread_stop, performed to end a kthread:
++ */
++TRACE_EVENT(sched_kthread_stop,
++
++ TP_PROTO(struct task_struct *t),
++
++ TP_ARGS(t),
++
++ TP_STRUCT__entry(
++ __array_text( char, comm, TASK_COMM_LEN )
++ __field( pid_t, tid )
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(comm, t->comm, TASK_COMM_LEN)
++ tp_assign(tid, t->pid)
++ ),
++
++ TP_printk("comm=%s tid=%d", __entry->comm, __entry->tid)
++)
++
++/*
++ * Tracepoint for the return value of the kthread stopping:
++ */
++TRACE_EVENT(sched_kthread_stop_ret,
++
++ TP_PROTO(int ret),
++
++ TP_ARGS(ret),
++
++ TP_STRUCT__entry(
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ tp_assign(ret, ret)
++ ),
++
++ TP_printk("ret=%d", __entry->ret)
++)
++
++/*
++ * Tracepoint for waking up a task:
++ */
++DECLARE_EVENT_CLASS(sched_wakeup_template,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ TP_PROTO(struct task_struct *p, int success),
++
++ TP_ARGS(p, success),
++#else
++ TP_PROTO(struct rq *rq, struct task_struct *p, int success),
++
++ TP_ARGS(rq, p, success),
++#endif
++
++ TP_STRUCT__entry(
++ __array_text( char, comm, TASK_COMM_LEN )
++ __field( pid_t, tid )
++ __field( int, prio )
++ __field( int, success )
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
++ __field( int, target_cpu )
++#endif
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(comm, p->comm, TASK_COMM_LEN)
++ tp_assign(tid, p->pid)
++ tp_assign(prio, p->prio)
++ tp_assign(success, success)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
++ tp_assign(target_cpu, task_cpu(p))
++#endif
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
++ )
++ TP_perf_assign(
++ __perf_task(p)
++#endif
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
++ TP_printk("comm=%s tid=%d prio=%d success=%d target_cpu=%03d",
++ __entry->comm, __entry->tid, __entry->prio,
++ __entry->success, __entry->target_cpu)
++#else
++ TP_printk("comm=%s tid=%d prio=%d success=%d",
++ __entry->comm, __entry->tid, __entry->prio,
++ __entry->success)
++#endif
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++
++DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
++ TP_PROTO(struct task_struct *p, int success),
++ TP_ARGS(p, success))
++
++/*
++ * Tracepoint for waking up a new task:
++ */
++DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
++ TP_PROTO(struct task_struct *p, int success),
++ TP_ARGS(p, success))
++
++#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
++
++DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
++ TP_PROTO(struct rq *rq, struct task_struct *p, int success),
++ TP_ARGS(rq, p, success))
++
++/*
++ * Tracepoint for waking up a new task:
++ */
++DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
++ TP_PROTO(struct rq *rq, struct task_struct *p, int success),
++ TP_ARGS(rq, p, success))
++
++#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
++
++/*
++ * Tracepoint for task switches, performed by the scheduler:
++ */
++TRACE_EVENT(sched_switch,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ TP_PROTO(struct task_struct *prev,
++ struct task_struct *next),
++
++ TP_ARGS(prev, next),
++#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
++ TP_PROTO(struct rq *rq, struct task_struct *prev,
++ struct task_struct *next),
++
++ TP_ARGS(rq, prev, next),
++#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
++
++ TP_STRUCT__entry(
++ __array_text( char, prev_comm, TASK_COMM_LEN )
++ __field( pid_t, prev_tid )
++ __field( int, prev_prio )
++ __field( long, prev_state )
++ __array_text( char, next_comm, TASK_COMM_LEN )
++ __field( pid_t, next_tid )
++ __field( int, next_prio )
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(next_comm, next->comm, TASK_COMM_LEN)
++ tp_assign(prev_tid, prev->pid)
++ tp_assign(prev_prio, prev->prio - MAX_RT_PRIO)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++ tp_assign(prev_state, __trace_sched_switch_state(prev))
++#else
++ tp_assign(prev_state, prev->state)
++#endif
++ tp_memcpy(prev_comm, prev->comm, TASK_COMM_LEN)
++ tp_assign(next_tid, next->pid)
++ tp_assign(next_prio, next->prio - MAX_RT_PRIO)
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
++ TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_tid=%d next_prio=%d",
++ __entry->prev_comm, __entry->prev_tid, __entry->prev_prio,
++ __entry->prev_state & (TASK_STATE_MAX-1) ?
++ __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
++ { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
++ { 16, "Z" }, { 32, "X" }, { 64, "x" },
++ { 128, "W" }) : "R",
++ __entry->prev_state & TASK_STATE_MAX ? "+" : "",
++ __entry->next_comm, __entry->next_tid, __entry->next_prio)
++#else
++ TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_tid=%d next_prio=%d",
++ __entry->prev_comm, __entry->prev_tid, __entry->prev_prio,
++ __entry->prev_state ?
++ __print_flags(__entry->prev_state, "|",
++ { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
++ { 16, "Z" }, { 32, "X" }, { 64, "x" },
++ { 128, "W" }) : "R",
++ __entry->next_comm, __entry->next_tid, __entry->next_prio)
++#endif
++)
++
++/*
++ * Tracepoint for a task being migrated:
++ */
++TRACE_EVENT(sched_migrate_task,
++
++ TP_PROTO(struct task_struct *p, int dest_cpu),
++
++ TP_ARGS(p, dest_cpu),
++
++ TP_STRUCT__entry(
++ __array_text( char, comm, TASK_COMM_LEN )
++ __field( pid_t, tid )
++ __field( int, prio )
++ __field( int, orig_cpu )
++ __field( int, dest_cpu )
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(comm, p->comm, TASK_COMM_LEN)
++ tp_assign(tid, p->pid)
++ tp_assign(prio, p->prio - MAX_RT_PRIO)
++ tp_assign(orig_cpu, task_cpu(p))
++ tp_assign(dest_cpu, dest_cpu)
++ ),
++
++ TP_printk("comm=%s tid=%d prio=%d orig_cpu=%d dest_cpu=%d",
++ __entry->comm, __entry->tid, __entry->prio,
++ __entry->orig_cpu, __entry->dest_cpu)
++)
++
++DECLARE_EVENT_CLASS(sched_process_template,
++
++ TP_PROTO(struct task_struct *p),
++
++ TP_ARGS(p),
++
++ TP_STRUCT__entry(
++ __array_text( char, comm, TASK_COMM_LEN )
++ __field( pid_t, tid )
++ __field( int, prio )
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(comm, p->comm, TASK_COMM_LEN)
++ tp_assign(tid, p->pid)
++ tp_assign(prio, p->prio - MAX_RT_PRIO)
++ ),
++
++ TP_printk("comm=%s tid=%d prio=%d",
++ __entry->comm, __entry->tid, __entry->prio)
++)
++
++/*
++ * Tracepoint for freeing a task:
++ */
++DEFINE_EVENT(sched_process_template, sched_process_free,
++ TP_PROTO(struct task_struct *p),
++ TP_ARGS(p))
++
++
++/*
++ * Tracepoint for a task exiting:
++ */
++DEFINE_EVENT(sched_process_template, sched_process_exit,
++ TP_PROTO(struct task_struct *p),
++ TP_ARGS(p))
++
++/*
++ * Tracepoint for waiting on task to unschedule:
++ */
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++DEFINE_EVENT(sched_process_template, sched_wait_task,
++ TP_PROTO(struct task_struct *p),
++ TP_ARGS(p))
++#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
++DEFINE_EVENT(sched_process_template, sched_wait_task,
++ TP_PROTO(struct rq *rq, struct task_struct *p),
++ TP_ARGS(rq, p))
++#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
++
++/*
++ * Tracepoint for a waiting task:
++ */
++TRACE_EVENT(sched_process_wait,
++
++ TP_PROTO(struct pid *pid),
++
++ TP_ARGS(pid),
++
++ TP_STRUCT__entry(
++ __array_text( char, comm, TASK_COMM_LEN )
++ __field( pid_t, tid )
++ __field( int, prio )
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(comm, current->comm, TASK_COMM_LEN)
++ tp_assign(tid, pid_nr(pid))
++ tp_assign(prio, current->prio - MAX_RT_PRIO)
++ ),
++
++ TP_printk("comm=%s tid=%d prio=%d",
++ __entry->comm, __entry->tid, __entry->prio)
++)
++
++/*
++ * Tracepoint for do_fork.
++ * Saving both TID and PID information, especially for the child, allows
++ * trace analyzers to distinguish between creation of a new process and
++ * creation of a new thread. Newly created processes will have child_tid
++ * == child_pid, while creation of a thread yields to child_tid !=
++ * child_pid.
++ */
++TRACE_EVENT(sched_process_fork,
++
++ TP_PROTO(struct task_struct *parent, struct task_struct *child),
++
++ TP_ARGS(parent, child),
++
++ TP_STRUCT__entry(
++ __array_text( char, parent_comm, TASK_COMM_LEN )
++ __field( pid_t, parent_tid )
++ __field( pid_t, parent_pid )
++ __array_text( char, child_comm, TASK_COMM_LEN )
++ __field( pid_t, child_tid )
++ __field( pid_t, child_pid )
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(parent_comm, parent->comm, TASK_COMM_LEN)
++ tp_assign(parent_tid, parent->pid)
++ tp_assign(parent_pid, parent->tgid)
++ tp_memcpy(child_comm, child->comm, TASK_COMM_LEN)
++ tp_assign(child_tid, child->pid)
++ tp_assign(child_pid, child->tgid)
++ ),
++
++ TP_printk("comm=%s tid=%d child_comm=%s child_tid=%d",
++ __entry->parent_comm, __entry->parent_tid,
++ __entry->child_comm, __entry->child_tid)
++)
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
++/*
++ * Tracepoint for sending a signal:
++ */
++TRACE_EVENT(sched_signal_send,
++
++ TP_PROTO(int sig, struct task_struct *p),
++
++ TP_ARGS(sig, p),
++
++ TP_STRUCT__entry(
++ __field( int, sig )
++ __array( char, comm, TASK_COMM_LEN )
++ __field( pid_t, pid )
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(comm, p->comm, TASK_COMM_LEN)
++ tp_assign(pid, p->pid)
++ tp_assign(sig, sig)
++ ),
++
++ TP_printk("sig=%d comm=%s pid=%d",
++ __entry->sig, __entry->comm, __entry->pid)
++)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++/*
++ * Tracepoint for exec:
++ */
++TRACE_EVENT(sched_process_exec,
++
++ TP_PROTO(struct task_struct *p, pid_t old_pid,
++ struct linux_binprm *bprm),
++
++ TP_ARGS(p, old_pid, bprm),
++
++ TP_STRUCT__entry(
++ __string( filename, bprm->filename )
++ __field( pid_t, tid )
++ __field( pid_t, old_tid )
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(filename, bprm->filename)
++ tp_assign(tid, p->pid)
++ tp_assign(old_tid, old_pid)
++ ),
++
++ TP_printk("filename=%s tid=%d old_tid=%d", __get_str(filename),
++ __entry->tid, __entry->old_tid)
++)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
++/*
++ * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
++ * adding sched_stat support to SCHED_FIFO/RR would be welcome.
++ */
++DECLARE_EVENT_CLASS(sched_stat_template,
++
++ TP_PROTO(struct task_struct *tsk, u64 delay),
++
++ TP_ARGS(tsk, delay),
++
++ TP_STRUCT__entry(
++ __array_text( char, comm, TASK_COMM_LEN )
++ __field( pid_t, tid )
++ __field( u64, delay )
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
++ tp_assign(tid, tsk->pid)
++ tp_assign(delay, delay)
++ )
++ TP_perf_assign(
++ __perf_count(delay)
++ ),
++
++ TP_printk("comm=%s tid=%d delay=%Lu [ns]",
++ __entry->comm, __entry->tid,
++ (unsigned long long)__entry->delay)
++)
++
++
++/*
++ * Tracepoint for accounting wait time (time the task is runnable
++ * but not actually running due to scheduler contention).
++ */
++DEFINE_EVENT(sched_stat_template, sched_stat_wait,
++ TP_PROTO(struct task_struct *tsk, u64 delay),
++ TP_ARGS(tsk, delay))
++
++/*
++ * Tracepoint for accounting sleep time (time the task is not runnable,
++ * including iowait, see below).
++ */
++DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
++ TP_PROTO(struct task_struct *tsk, u64 delay),
++ TP_ARGS(tsk, delay))
++
++/*
++ * Tracepoint for accounting iowait time (time the task is not runnable
++ * due to waiting on IO to complete).
++ */
++DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
++ TP_PROTO(struct task_struct *tsk, u64 delay),
++ TP_ARGS(tsk, delay))
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++/*
++ * Tracepoint for accounting blocked time (time the task is in uninterruptible).
++ */
++DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
++ TP_PROTO(struct task_struct *tsk, u64 delay),
++ TP_ARGS(tsk, delay))
++#endif
++
++/*
++ * Tracepoint for accounting runtime (time the task is executing
++ * on a CPU).
++ */
++TRACE_EVENT(sched_stat_runtime,
++
++ TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
++
++ TP_ARGS(tsk, runtime, vruntime),
++
++ TP_STRUCT__entry(
++ __array_text( char, comm, TASK_COMM_LEN )
++ __field( pid_t, tid )
++ __field( u64, runtime )
++ __field( u64, vruntime )
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
++ tp_assign(tid, tsk->pid)
++ tp_assign(runtime, runtime)
++ tp_assign(vruntime, vruntime)
++ )
++ TP_perf_assign(
++ __perf_count(runtime)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
++ __perf_task(tsk)
++#endif
++ ),
++
++ TP_printk("comm=%s tid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
++ __entry->comm, __entry->tid,
++ (unsigned long long)__entry->runtime,
++ (unsigned long long)__entry->vruntime)
++)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++/*
++ * Tracepoint for showing priority inheritance modifying a tasks
++ * priority.
++ */
++TRACE_EVENT(sched_pi_setprio,
++
++ TP_PROTO(struct task_struct *tsk, int newprio),
++
++ TP_ARGS(tsk, newprio),
++
++ TP_STRUCT__entry(
++ __array_text( char, comm, TASK_COMM_LEN )
++ __field( pid_t, tid )
++ __field( int, oldprio )
++ __field( int, newprio )
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
++ tp_assign(tid, tsk->pid)
++ tp_assign(oldprio, tsk->prio - MAX_RT_PRIO)
++ tp_assign(newprio, newprio - MAX_RT_PRIO)
++ ),
++
++ TP_printk("comm=%s tid=%d oldprio=%d newprio=%d",
++ __entry->comm, __entry->tid,
++ __entry->oldprio, __entry->newprio)
++)
++#endif
++
++#endif /* _TRACE_SCHED_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/scsi.h
+@@ -0,0 +1,406 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM scsi
++
++#if !defined(_TRACE_SCSI_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SCSI_H
++
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_host.h>
++#include <linux/tracepoint.h>
++#include <linux/trace_seq.h>
++#include <linux/version.h>
++
++#ifndef _TRACE_SCSI_DEF
++#define _TRACE_SCSI_DEF
++
++#define scsi_opcode_name(opcode) { opcode, #opcode }
++#define show_opcode_name(val) \
++ __print_symbolic(val, \
++ scsi_opcode_name(TEST_UNIT_READY), \
++ scsi_opcode_name(REZERO_UNIT), \
++ scsi_opcode_name(REQUEST_SENSE), \
++ scsi_opcode_name(FORMAT_UNIT), \
++ scsi_opcode_name(READ_BLOCK_LIMITS), \
++ scsi_opcode_name(REASSIGN_BLOCKS), \
++ scsi_opcode_name(INITIALIZE_ELEMENT_STATUS), \
++ scsi_opcode_name(READ_6), \
++ scsi_opcode_name(WRITE_6), \
++ scsi_opcode_name(SEEK_6), \
++ scsi_opcode_name(READ_REVERSE), \
++ scsi_opcode_name(WRITE_FILEMARKS), \
++ scsi_opcode_name(SPACE), \
++ scsi_opcode_name(INQUIRY), \
++ scsi_opcode_name(RECOVER_BUFFERED_DATA), \
++ scsi_opcode_name(MODE_SELECT), \
++ scsi_opcode_name(RESERVE), \
++ scsi_opcode_name(RELEASE), \
++ scsi_opcode_name(COPY), \
++ scsi_opcode_name(ERASE), \
++ scsi_opcode_name(MODE_SENSE), \
++ scsi_opcode_name(START_STOP), \
++ scsi_opcode_name(RECEIVE_DIAGNOSTIC), \
++ scsi_opcode_name(SEND_DIAGNOSTIC), \
++ scsi_opcode_name(ALLOW_MEDIUM_REMOVAL), \
++ scsi_opcode_name(SET_WINDOW), \
++ scsi_opcode_name(READ_CAPACITY), \
++ scsi_opcode_name(READ_10), \
++ scsi_opcode_name(WRITE_10), \
++ scsi_opcode_name(SEEK_10), \
++ scsi_opcode_name(POSITION_TO_ELEMENT), \
++ scsi_opcode_name(WRITE_VERIFY), \
++ scsi_opcode_name(VERIFY), \
++ scsi_opcode_name(SEARCH_HIGH), \
++ scsi_opcode_name(SEARCH_EQUAL), \
++ scsi_opcode_name(SEARCH_LOW), \
++ scsi_opcode_name(SET_LIMITS), \
++ scsi_opcode_name(PRE_FETCH), \
++ scsi_opcode_name(READ_POSITION), \
++ scsi_opcode_name(SYNCHRONIZE_CACHE), \
++ scsi_opcode_name(LOCK_UNLOCK_CACHE), \
++ scsi_opcode_name(READ_DEFECT_DATA), \
++ scsi_opcode_name(MEDIUM_SCAN), \
++ scsi_opcode_name(COMPARE), \
++ scsi_opcode_name(COPY_VERIFY), \
++ scsi_opcode_name(WRITE_BUFFER), \
++ scsi_opcode_name(READ_BUFFER), \
++ scsi_opcode_name(UPDATE_BLOCK), \
++ scsi_opcode_name(READ_LONG), \
++ scsi_opcode_name(WRITE_LONG), \
++ scsi_opcode_name(CHANGE_DEFINITION), \
++ scsi_opcode_name(WRITE_SAME), \
++ scsi_opcode_name(UNMAP), \
++ scsi_opcode_name(READ_TOC), \
++ scsi_opcode_name(LOG_SELECT), \
++ scsi_opcode_name(LOG_SENSE), \
++ scsi_opcode_name(XDWRITEREAD_10), \
++ scsi_opcode_name(MODE_SELECT_10), \
++ scsi_opcode_name(RESERVE_10), \
++ scsi_opcode_name(RELEASE_10), \
++ scsi_opcode_name(MODE_SENSE_10), \
++ scsi_opcode_name(PERSISTENT_RESERVE_IN), \
++ scsi_opcode_name(PERSISTENT_RESERVE_OUT), \
++ scsi_opcode_name(VARIABLE_LENGTH_CMD), \
++ scsi_opcode_name(REPORT_LUNS), \
++ scsi_opcode_name(MAINTENANCE_IN), \
++ scsi_opcode_name(MAINTENANCE_OUT), \
++ scsi_opcode_name(MOVE_MEDIUM), \
++ scsi_opcode_name(EXCHANGE_MEDIUM), \
++ scsi_opcode_name(READ_12), \
++ scsi_opcode_name(WRITE_12), \
++ scsi_opcode_name(WRITE_VERIFY_12), \
++ scsi_opcode_name(SEARCH_HIGH_12), \
++ scsi_opcode_name(SEARCH_EQUAL_12), \
++ scsi_opcode_name(SEARCH_LOW_12), \
++ scsi_opcode_name(READ_ELEMENT_STATUS), \
++ scsi_opcode_name(SEND_VOLUME_TAG), \
++ scsi_opcode_name(WRITE_LONG_2), \
++ scsi_opcode_name(READ_16), \
++ scsi_opcode_name(WRITE_16), \
++ scsi_opcode_name(VERIFY_16), \
++ scsi_opcode_name(WRITE_SAME_16), \
++ scsi_opcode_name(SERVICE_ACTION_IN), \
++ scsi_opcode_name(SAI_READ_CAPACITY_16), \
++ scsi_opcode_name(SAI_GET_LBA_STATUS), \
++ scsi_opcode_name(MI_REPORT_TARGET_PGS), \
++ scsi_opcode_name(MO_SET_TARGET_PGS), \
++ scsi_opcode_name(READ_32), \
++ scsi_opcode_name(WRITE_32), \
++ scsi_opcode_name(WRITE_SAME_32), \
++ scsi_opcode_name(ATA_16), \
++ scsi_opcode_name(ATA_12))
++
++#define scsi_hostbyte_name(result) { result, #result }
++#define show_hostbyte_name(val) \
++ __print_symbolic(val, \
++ scsi_hostbyte_name(DID_OK), \
++ scsi_hostbyte_name(DID_NO_CONNECT), \
++ scsi_hostbyte_name(DID_BUS_BUSY), \
++ scsi_hostbyte_name(DID_TIME_OUT), \
++ scsi_hostbyte_name(DID_BAD_TARGET), \
++ scsi_hostbyte_name(DID_ABORT), \
++ scsi_hostbyte_name(DID_PARITY), \
++ scsi_hostbyte_name(DID_ERROR), \
++ scsi_hostbyte_name(DID_RESET), \
++ scsi_hostbyte_name(DID_BAD_INTR), \
++ scsi_hostbyte_name(DID_PASSTHROUGH), \
++ scsi_hostbyte_name(DID_SOFT_ERROR), \
++ scsi_hostbyte_name(DID_IMM_RETRY), \
++ scsi_hostbyte_name(DID_REQUEUE), \
++ scsi_hostbyte_name(DID_TRANSPORT_DISRUPTED), \
++ scsi_hostbyte_name(DID_TRANSPORT_FAILFAST))
++
++#define scsi_driverbyte_name(result) { result, #result }
++#define show_driverbyte_name(val) \
++ __print_symbolic(val, \
++ scsi_driverbyte_name(DRIVER_OK), \
++ scsi_driverbyte_name(DRIVER_BUSY), \
++ scsi_driverbyte_name(DRIVER_SOFT), \
++ scsi_driverbyte_name(DRIVER_MEDIA), \
++ scsi_driverbyte_name(DRIVER_ERROR), \
++ scsi_driverbyte_name(DRIVER_INVALID), \
++ scsi_driverbyte_name(DRIVER_TIMEOUT), \
++ scsi_driverbyte_name(DRIVER_HARD), \
++ scsi_driverbyte_name(DRIVER_SENSE))
++
++#define scsi_msgbyte_name(result) { result, #result }
++#define show_msgbyte_name(val) \
++ __print_symbolic(val, \
++ scsi_msgbyte_name(COMMAND_COMPLETE), \
++ scsi_msgbyte_name(EXTENDED_MESSAGE), \
++ scsi_msgbyte_name(SAVE_POINTERS), \
++ scsi_msgbyte_name(RESTORE_POINTERS), \
++ scsi_msgbyte_name(DISCONNECT), \
++ scsi_msgbyte_name(INITIATOR_ERROR), \
++ scsi_msgbyte_name(ABORT_TASK_SET), \
++ scsi_msgbyte_name(MESSAGE_REJECT), \
++ scsi_msgbyte_name(NOP), \
++ scsi_msgbyte_name(MSG_PARITY_ERROR), \
++ scsi_msgbyte_name(LINKED_CMD_COMPLETE), \
++ scsi_msgbyte_name(LINKED_FLG_CMD_COMPLETE), \
++ scsi_msgbyte_name(TARGET_RESET), \
++ scsi_msgbyte_name(ABORT_TASK), \
++ scsi_msgbyte_name(CLEAR_TASK_SET), \
++ scsi_msgbyte_name(INITIATE_RECOVERY), \
++ scsi_msgbyte_name(RELEASE_RECOVERY), \
++ scsi_msgbyte_name(CLEAR_ACA), \
++ scsi_msgbyte_name(LOGICAL_UNIT_RESET), \
++ scsi_msgbyte_name(SIMPLE_QUEUE_TAG), \
++ scsi_msgbyte_name(HEAD_OF_QUEUE_TAG), \
++ scsi_msgbyte_name(ORDERED_QUEUE_TAG), \
++ scsi_msgbyte_name(IGNORE_WIDE_RESIDUE), \
++ scsi_msgbyte_name(ACA), \
++ scsi_msgbyte_name(QAS_REQUEST), \
++ scsi_msgbyte_name(BUS_DEVICE_RESET), \
++ scsi_msgbyte_name(ABORT))
++
++#define scsi_statusbyte_name(result) { result, #result }
++#define show_statusbyte_name(val) \
++ __print_symbolic(val, \
++ scsi_statusbyte_name(SAM_STAT_GOOD), \
++ scsi_statusbyte_name(SAM_STAT_CHECK_CONDITION), \
++ scsi_statusbyte_name(SAM_STAT_CONDITION_MET), \
++ scsi_statusbyte_name(SAM_STAT_BUSY), \
++ scsi_statusbyte_name(SAM_STAT_INTERMEDIATE), \
++ scsi_statusbyte_name(SAM_STAT_INTERMEDIATE_CONDITION_MET), \
++ scsi_statusbyte_name(SAM_STAT_RESERVATION_CONFLICT), \
++ scsi_statusbyte_name(SAM_STAT_COMMAND_TERMINATED), \
++ scsi_statusbyte_name(SAM_STAT_TASK_SET_FULL), \
++ scsi_statusbyte_name(SAM_STAT_ACA_ACTIVE), \
++ scsi_statusbyte_name(SAM_STAT_TASK_ABORTED))
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++#define scsi_prot_op_name(result) { result, #result }
++#define show_prot_op_name(val) \
++ __print_symbolic(val, \
++ scsi_prot_op_name(SCSI_PROT_NORMAL), \
++ scsi_prot_op_name(SCSI_PROT_READ_INSERT), \
++ scsi_prot_op_name(SCSI_PROT_WRITE_STRIP), \
++ scsi_prot_op_name(SCSI_PROT_READ_STRIP), \
++ scsi_prot_op_name(SCSI_PROT_WRITE_INSERT), \
++ scsi_prot_op_name(SCSI_PROT_READ_PASS), \
++ scsi_prot_op_name(SCSI_PROT_WRITE_PASS))
++#endif
++
++const char *scsi_trace_parse_cdb(struct trace_seq*, unsigned char*, int);
++#define __parse_cdb(cdb, len) scsi_trace_parse_cdb(p, cdb, len)
++#endif
++
++TRACE_EVENT(scsi_dispatch_cmd_start,
++
++ TP_PROTO(struct scsi_cmnd *cmd),
++
++ TP_ARGS(cmd),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, host_no )
++ __field( unsigned int, channel )
++ __field( unsigned int, id )
++ __field( unsigned int, lun )
++ __field( unsigned int, opcode )
++ __field( unsigned int, cmd_len )
++ __field( unsigned int, data_sglen )
++ __field( unsigned int, prot_sglen )
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ __field( unsigned char, prot_op )
++#endif
++ __dynamic_array_hex(unsigned char, cmnd, cmd->cmd_len)
++ ),
++
++ TP_fast_assign(
++ tp_assign(host_no, cmd->device->host->host_no)
++ tp_assign(channel, cmd->device->channel)
++ tp_assign(id, cmd->device->id)
++ tp_assign(lun, cmd->device->lun)
++ tp_assign(opcode, cmd->cmnd[0])
++ tp_assign(cmd_len, cmd->cmd_len)
++ tp_assign(data_sglen, scsi_sg_count(cmd))
++ tp_assign(prot_sglen, scsi_prot_sg_count(cmd))
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ tp_assign(prot_op, scsi_get_prot_op(cmd))
++#endif
++ tp_memcpy_dyn(cmnd, cmd->cmnd)
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
++ " prot_op=%s cmnd=(%s %s raw=%s)",
++#else
++ TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
++ " cmnd=(%s %s raw=%s)",
++#endif
++ __entry->host_no, __entry->channel, __entry->id,
++ __entry->lun, __entry->data_sglen, __entry->prot_sglen,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ show_prot_op_name(__entry->prot_op),
++#endif
++ show_opcode_name(__entry->opcode),
++ __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
++ __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len))
++)
++
++TRACE_EVENT(scsi_dispatch_cmd_error,
++
++ TP_PROTO(struct scsi_cmnd *cmd, int rtn),
++
++ TP_ARGS(cmd, rtn),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, host_no )
++ __field( unsigned int, channel )
++ __field( unsigned int, id )
++ __field( unsigned int, lun )
++ __field( int, rtn )
++ __field( unsigned int, opcode )
++ __field( unsigned int, cmd_len )
++ __field( unsigned int, data_sglen )
++ __field( unsigned int, prot_sglen )
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ __field( unsigned char, prot_op )
++#endif
++ __dynamic_array_hex(unsigned char, cmnd, cmd->cmd_len)
++ ),
++
++ TP_fast_assign(
++ tp_assign(host_no, cmd->device->host->host_no)
++ tp_assign(channel, cmd->device->channel)
++ tp_assign(id, cmd->device->id)
++ tp_assign(lun, cmd->device->lun)
++ tp_assign(rtn, rtn)
++ tp_assign(opcode, cmd->cmnd[0])
++ tp_assign(cmd_len, cmd->cmd_len)
++ tp_assign(data_sglen, scsi_sg_count(cmd))
++ tp_assign(prot_sglen, scsi_prot_sg_count(cmd))
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ tp_assign(prot_op, scsi_get_prot_op(cmd))
++#endif
++ tp_memcpy_dyn(cmnd, cmd->cmnd)
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
++ " prot_op=%s cmnd=(%s %s raw=%s) rtn=%d",
++#else
++ TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
++ " cmnd=(%s %s raw=%s) rtn=%d",
++#endif
++ __entry->host_no, __entry->channel, __entry->id,
++ __entry->lun, __entry->data_sglen, __entry->prot_sglen,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ show_prot_op_name(__entry->prot_op),
++#endif
++ show_opcode_name(__entry->opcode),
++ __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
++ __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
++ __entry->rtn)
++)
++
++DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
++
++ TP_PROTO(struct scsi_cmnd *cmd),
++
++ TP_ARGS(cmd),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, host_no )
++ __field( unsigned int, channel )
++ __field( unsigned int, id )
++ __field( unsigned int, lun )
++ __field( int, result )
++ __field( unsigned int, opcode )
++ __field( unsigned int, cmd_len )
++ __field( unsigned int, data_sglen )
++ __field( unsigned int, prot_sglen )
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ __field( unsigned char, prot_op )
++#endif
++ __dynamic_array_hex(unsigned char, cmnd, cmd->cmd_len)
++ ),
++
++ TP_fast_assign(
++ tp_assign(host_no, cmd->device->host->host_no)
++ tp_assign(channel, cmd->device->channel)
++ tp_assign(id, cmd->device->id)
++ tp_assign(lun, cmd->device->lun)
++ tp_assign(result, cmd->result)
++ tp_assign(opcode, cmd->cmnd[0])
++ tp_assign(cmd_len, cmd->cmd_len)
++ tp_assign(data_sglen, scsi_sg_count(cmd))
++ tp_assign(prot_sglen, scsi_prot_sg_count(cmd))
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ tp_assign(prot_op, scsi_get_prot_op(cmd))
++#endif
++ tp_memcpy_dyn(cmnd, cmd->cmnd)
++ ),
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u " \
++ "prot_sgl=%u prot_op=%s cmnd=(%s %s raw=%s) result=(driver=" \
++ "%s host=%s message=%s status=%s)",
++#else
++ TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u " \
++ "prot_sgl=%u cmnd=(%s %s raw=%s) result=(driver=%s host=%s " \
++ "message=%s status=%s)",
++#endif
++ __entry->host_no, __entry->channel, __entry->id,
++ __entry->lun, __entry->data_sglen, __entry->prot_sglen,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
++ show_prot_op_name(__entry->prot_op),
++#endif
++ show_opcode_name(__entry->opcode),
++ __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
++ __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
++ show_driverbyte_name(((__entry->result) >> 24) & 0xff),
++ show_hostbyte_name(((__entry->result) >> 16) & 0xff),
++ show_msgbyte_name(((__entry->result) >> 8) & 0xff),
++ show_statusbyte_name(__entry->result & 0xff))
++)
++
++DEFINE_EVENT(scsi_cmd_done_timeout_template, scsi_dispatch_cmd_done,
++ TP_PROTO(struct scsi_cmnd *cmd),
++ TP_ARGS(cmd))
++
++DEFINE_EVENT(scsi_cmd_done_timeout_template, scsi_dispatch_cmd_timeout,
++ TP_PROTO(struct scsi_cmnd *cmd),
++ TP_ARGS(cmd))
++
++TRACE_EVENT(scsi_eh_wakeup,
++
++ TP_PROTO(struct Scsi_Host *shost),
++
++ TP_ARGS(shost),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, host_no )
++ ),
++
++ TP_fast_assign(
++ tp_assign(host_no, shost->host_no)
++ ),
++
++ TP_printk("host_no=%u", __entry->host_no)
++)
++
++#endif /* _TRACE_SCSI_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/signal.h
+@@ -0,0 +1,202 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM signal
++
++#if !defined(_TRACE_SIGNAL_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SIGNAL_H
++
++#include <linux/tracepoint.h>
++#include <linux/version.h>
++
++#ifndef _TRACE_SIGNAL_DEF
++#define _TRACE_SIGNAL_DEF
++#include <linux/signal.h>
++#include <linux/sched.h>
++#undef TP_STORE_SIGINFO
++#define TP_STORE_SIGINFO(info) \
++ tp_assign(errno, \
++ (info == SEND_SIG_NOINFO || info == SEND_SIG_FORCED || info == SEND_SIG_PRIV) ? \
++ 0 : \
++ info->si_errno) \
++ tp_assign(code, \
++ (info == SEND_SIG_NOINFO || info == SEND_SIG_FORCED) ? \
++ SI_USER : \
++ ((info == SEND_SIG_PRIV) ? SI_KERNEL : info->si_code))
++#endif /* _TRACE_SIGNAL_DEF */
++
++/**
++ * signal_generate - called when a signal is generated
++ * @sig: signal number
++ * @info: pointer to struct siginfo
++ * @task: pointer to struct task_struct
++ *
++ * Current process sends a 'sig' signal to 'task' process with
++ * 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV,
++ * 'info' is not a pointer and you can't access its field. Instead,
++ * SEND_SIG_NOINFO means that si_code is SI_USER, and SEND_SIG_PRIV
++ * means that si_code is SI_KERNEL.
++ */
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0))
++TRACE_EVENT(signal_generate,
++
++ TP_PROTO(int sig, struct siginfo *info, struct task_struct *task),
++
++ TP_ARGS(sig, info, task),
++
++ TP_STRUCT__entry(
++ __field( int, sig )
++ __field( int, errno )
++ __field( int, code )
++ __array_text( char, comm, TASK_COMM_LEN )
++ __field( pid_t, pid )
++ ),
++
++ TP_fast_assign(
++ tp_assign(sig, sig)
++ TP_STORE_SIGINFO(info)
++ tp_memcpy(comm, task->comm, TASK_COMM_LEN)
++ tp_assign(pid, task->pid)
++ ),
++
++ TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d",
++ __entry->sig, __entry->errno, __entry->code,
++ __entry->comm, __entry->pid)
++)
++#else
++TRACE_EVENT(signal_generate,
++
++ TP_PROTO(int sig, struct siginfo *info, struct task_struct *task,
++ int group, int result),
++
++ TP_ARGS(sig, info, task, group, result),
++
++ TP_STRUCT__entry(
++ __field( int, sig )
++ __field( int, errno )
++ __field( int, code )
++ __array_text( char, comm, TASK_COMM_LEN )
++ __field( pid_t, pid )
++ __field( int, group )
++ __field( int, result )
++ ),
++
++ TP_fast_assign(
++ tp_assign(sig, sig)
++ TP_STORE_SIGINFO(info)
++ tp_memcpy(comm, task->comm, TASK_COMM_LEN)
++ tp_assign(pid, task->pid)
++ tp_assign(group, group)
++ tp_assign(result, result)
++ ),
++
++ TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d grp=%d res=%d",
++ __entry->sig, __entry->errno, __entry->code,
++ __entry->comm, __entry->pid, __entry->group,
++ __entry->result)
++)
++#endif
++
++/**
++ * signal_deliver - called when a signal is delivered
++ * @sig: signal number
++ * @info: pointer to struct siginfo
++ * @ka: pointer to struct k_sigaction
++ *
++ * A 'sig' signal is delivered to current process with 'info' siginfo,
++ * and it will be handled by 'ka'. ka->sa.sa_handler can be SIG_IGN or
++ * SIG_DFL.
++ * Note that some signals reported by signal_generate tracepoint can be
++ * lost, ignored or modified (by debugger) before hitting this tracepoint.
++ * This means, this can show which signals are actually delivered, but
++ * matching generated signals and delivered signals may not be correct.
++ */
++TRACE_EVENT(signal_deliver,
++
++ TP_PROTO(int sig, struct siginfo *info, struct k_sigaction *ka),
++
++ TP_ARGS(sig, info, ka),
++
++ TP_STRUCT__entry(
++ __field( int, sig )
++ __field( int, errno )
++ __field( int, code )
++ __field( unsigned long, sa_handler )
++ __field( unsigned long, sa_flags )
++ ),
++
++ TP_fast_assign(
++ tp_assign(sig, sig)
++ TP_STORE_SIGINFO(info)
++ tp_assign(sa_handler, (unsigned long)ka->sa.sa_handler)
++ tp_assign(sa_flags, ka->sa.sa_flags)
++ ),
++
++ TP_printk("sig=%d errno=%d code=%d sa_handler=%lx sa_flags=%lx",
++ __entry->sig, __entry->errno, __entry->code,
++ __entry->sa_handler, __entry->sa_flags)
++)
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0))
++DECLARE_EVENT_CLASS(signal_queue_overflow,
++
++ TP_PROTO(int sig, int group, struct siginfo *info),
++
++ TP_ARGS(sig, group, info),
++
++ TP_STRUCT__entry(
++ __field( int, sig )
++ __field( int, group )
++ __field( int, errno )
++ __field( int, code )
++ ),
++
++ TP_fast_assign(
++ tp_assign(sig, sig)
++ tp_assign(group, group)
++ TP_STORE_SIGINFO(info)
++ ),
++
++ TP_printk("sig=%d group=%d errno=%d code=%d",
++ __entry->sig, __entry->group, __entry->errno, __entry->code)
++)
++
++/**
++ * signal_overflow_fail - called when signal queue is overflow
++ * @sig: signal number
++ * @group: signal to process group or not (bool)
++ * @info: pointer to struct siginfo
++ *
++ * Kernel fails to generate 'sig' signal with 'info' siginfo, because
++ * siginfo queue is overflow, and the signal is dropped.
++ * 'group' is not 0 if the signal will be sent to a process group.
++ * 'sig' is always one of RT signals.
++ */
++DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail,
++
++ TP_PROTO(int sig, int group, struct siginfo *info),
++
++ TP_ARGS(sig, group, info)
++)
++
++/**
++ * signal_lose_info - called when siginfo is lost
++ * @sig: signal number
++ * @group: signal to process group or not (bool)
++ * @info: pointer to struct siginfo
++ *
++ * Kernel generates 'sig' signal but loses 'info' siginfo, because siginfo
++ * queue is overflow.
++ * 'group' is not 0 if the signal will be sent to a process group.
++ * 'sig' is always one of non-RT signals.
++ */
++DEFINE_EVENT(signal_queue_overflow, signal_lose_info,
++
++ TP_PROTO(int sig, int group, struct siginfo *info),
++
++ TP_ARGS(sig, group, info)
++)
++#endif
++
++#endif /* _TRACE_SIGNAL_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/skb.h
+@@ -0,0 +1,84 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM skb
++
++#if !defined(_TRACE_SKB_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SKB_H
++
++#include <linux/skbuff.h>
++#include <linux/netdevice.h>
++#include <linux/tracepoint.h>
++#include <linux/version.h>
++
++/*
++ * Tracepoint for free an sk_buff:
++ */
++TRACE_EVENT_MAP(kfree_skb,
++
++ skb_kfree,
++
++ TP_PROTO(struct sk_buff *skb, void *location),
++
++ TP_ARGS(skb, location),
++
++ TP_STRUCT__entry(
++ __field( void *, skbaddr )
++ __field( void *, location )
++ __field( unsigned short, protocol )
++ ),
++
++ TP_fast_assign(
++ tp_assign(skbaddr, skb)
++ tp_assign(location, location)
++ tp_assign(protocol, ntohs(skb->protocol))
++ ),
++
++ TP_printk("skbaddr=%p protocol=%u location=%p",
++ __entry->skbaddr, __entry->protocol, __entry->location)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++TRACE_EVENT_MAP(consume_skb,
++
++ skb_consume,
++
++ TP_PROTO(struct sk_buff *skb),
++
++ TP_ARGS(skb),
++
++ TP_STRUCT__entry(
++ __field( void *, skbaddr )
++ ),
++
++ TP_fast_assign(
++ tp_assign(skbaddr, skb)
++ ),
++
++ TP_printk("skbaddr=%p", __entry->skbaddr)
++)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
++TRACE_EVENT(skb_copy_datagram_iovec,
++
++ TP_PROTO(const struct sk_buff *skb, int len),
++
++ TP_ARGS(skb, len),
++
++ TP_STRUCT__entry(
++ __field( const void *, skbaddr )
++ __field( int, len )
++ ),
++
++ TP_fast_assign(
++ tp_assign(skbaddr, skb)
++ tp_assign(len, len)
++ ),
++
++ TP_printk("skbaddr=%p len=%d", __entry->skbaddr, __entry->len)
++)
++#endif
++
++#endif /* _TRACE_SKB_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/sock.h
+@@ -0,0 +1,68 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM sock
++
++#if !defined(_TRACE_SOCK_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SOCK_H
++
++#include <net/sock.h>
++#include <linux/tracepoint.h>
++
++TRACE_EVENT(sock_rcvqueue_full,
++
++ TP_PROTO(struct sock *sk, struct sk_buff *skb),
++
++ TP_ARGS(sk, skb),
++
++ TP_STRUCT__entry(
++ __field(int, rmem_alloc)
++ __field(unsigned int, truesize)
++ __field(int, sk_rcvbuf)
++ ),
++
++ TP_fast_assign(
++ tp_assign(rmem_alloc, atomic_read(&sk->sk_rmem_alloc))
++ tp_assign(truesize, skb->truesize)
++ tp_assign(sk_rcvbuf, sk->sk_rcvbuf)
++ ),
++
++ TP_printk("rmem_alloc=%d truesize=%u sk_rcvbuf=%d",
++ __entry->rmem_alloc, __entry->truesize, __entry->sk_rcvbuf)
++)
++
++TRACE_EVENT(sock_exceed_buf_limit,
++
++ TP_PROTO(struct sock *sk, struct proto *prot, long allocated),
++
++ TP_ARGS(sk, prot, allocated),
++
++ TP_STRUCT__entry(
++ __string(name, prot->name)
++ __array(long, sysctl_mem, 3)
++ __field(long, allocated)
++ __field(int, sysctl_rmem)
++ __field(int, rmem_alloc)
++ ),
++
++ TP_fast_assign(
++ tp_strcpy(name, prot->name)
++ tp_memcpy(sysctl_mem, prot->sysctl_mem, 3 * sizeof(long))
++ tp_assign(allocated, allocated)
++ tp_assign(sysctl_rmem, prot->sysctl_rmem[0])
++ tp_assign(rmem_alloc, atomic_read(&sk->sk_rmem_alloc))
++ ),
++
++ TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld "
++ "sysctl_rmem=%d rmem_alloc=%d",
++ __entry->name,
++ __entry->sysctl_mem[0],
++ __entry->sysctl_mem[1],
++ __entry->sysctl_mem[2],
++ __entry->allocated,
++ __entry->sysctl_rmem,
++ __entry->rmem_alloc)
++)
++
++#endif /* _TRACE_SOCK_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/sunrpc.h
+@@ -0,0 +1,177 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM sunrpc
++
++#if !defined(_TRACE_SUNRPC_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SUNRPC_H
++
++#include <linux/sunrpc/sched.h>
++#include <linux/sunrpc/clnt.h>
++#include <linux/tracepoint.h>
++
++DECLARE_EVENT_CLASS(rpc_task_status,
++
++ TP_PROTO(struct rpc_task *task),
++
++ TP_ARGS(task),
++
++ TP_STRUCT__entry(
++ __field(const struct rpc_task *, task)
++ __field(const struct rpc_clnt *, clnt)
++ __field(int, status)
++ ),
++
++ TP_fast_assign(
++ tp_assign(task, task)
++ tp_assign(clnt, task->tk_client)
++ tp_assign(status, task->tk_status)
++ ),
++
++ TP_printk("task:%p@%p, status %d",__entry->task, __entry->clnt, __entry->status)
++)
++
++DEFINE_EVENT(rpc_task_status, rpc_call_status,
++ TP_PROTO(struct rpc_task *task),
++
++ TP_ARGS(task)
++)
++
++DEFINE_EVENT(rpc_task_status, rpc_bind_status,
++ TP_PROTO(struct rpc_task *task),
++
++ TP_ARGS(task)
++)
++
++TRACE_EVENT(rpc_connect_status,
++ TP_PROTO(struct rpc_task *task, int status),
++
++ TP_ARGS(task, status),
++
++ TP_STRUCT__entry(
++ __field(const struct rpc_task *, task)
++ __field(const struct rpc_clnt *, clnt)
++ __field(int, status)
++ ),
++
++ TP_fast_assign(
++ tp_assign(task, task)
++ tp_assign(clnt, task->tk_client)
++ tp_assign(status, status)
++ ),
++
++ TP_printk("task:%p@%p, status %d",__entry->task, __entry->clnt, __entry->status)
++)
++
++DECLARE_EVENT_CLASS(rpc_task_running,
++
++ TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
++
++ TP_ARGS(clnt, task, action),
++
++ TP_STRUCT__entry(
++ __field(const struct rpc_clnt *, clnt)
++ __field(const struct rpc_task *, task)
++ __field(const void *, action)
++ __field(unsigned long, runstate)
++ __field(int, status)
++ __field(unsigned short, flags)
++ ),
++
++ TP_fast_assign(
++ tp_assign(clnt, clnt)
++ tp_assign(task, task)
++ tp_assign(action, action)
++ tp_assign(runstate, task->tk_runstate)
++ tp_assign(status, task->tk_status)
++ tp_assign(flags, task->tk_flags)
++ ),
++
++ TP_printk("task:%p@%p flags=%4.4x state=%4.4lx status=%d action=%pf",
++ __entry->task,
++ __entry->clnt,
++ __entry->flags,
++ __entry->runstate,
++ __entry->status,
++ __entry->action
++ )
++)
++
++DEFINE_EVENT(rpc_task_running, rpc_task_begin,
++
++ TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
++
++ TP_ARGS(clnt, task, action)
++
++)
++
++DEFINE_EVENT(rpc_task_running, rpc_task_run_action,
++
++ TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
++
++ TP_ARGS(clnt, task, action)
++
++)
++
++DEFINE_EVENT(rpc_task_running, rpc_task_complete,
++
++ TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
++
++ TP_ARGS(clnt, task, action)
++
++)
++
++DECLARE_EVENT_CLASS(rpc_task_queued,
++
++ TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
++
++ TP_ARGS(clnt, task, q),
++
++ TP_STRUCT__entry(
++ __field(const struct rpc_clnt *, clnt)
++ __field(const struct rpc_task *, task)
++ __field(unsigned long, timeout)
++ __field(unsigned long, runstate)
++ __field(int, status)
++ __field(unsigned short, flags)
++ __string(q_name, rpc_qname(q))
++ ),
++
++ TP_fast_assign(
++ tp_assign(clnt, clnt)
++ tp_assign(task, task)
++ tp_assign(timeout, task->tk_timeout)
++ tp_assign(runstate, task->tk_runstate)
++ tp_assign(status, task->tk_status)
++ tp_assign(flags, task->tk_flags)
++ tp_strcpy(q_name, rpc_qname(q))
++ ),
++
++ TP_printk("task:%p@%p flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s",
++ __entry->task,
++ __entry->clnt,
++ __entry->flags,
++ __entry->runstate,
++ __entry->status,
++ __entry->timeout,
++ __get_str(q_name)
++ )
++)
++
++DEFINE_EVENT(rpc_task_queued, rpc_task_sleep,
++
++ TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
++
++ TP_ARGS(clnt, task, q)
++
++)
++
++DEFINE_EVENT(rpc_task_queued, rpc_task_wakeup,
++
++ TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
++
++ TP_ARGS(clnt, task, q)
++
++)
++
++#endif /* _TRACE_SUNRPC_H */
++
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/syscalls.h
+@@ -0,0 +1,76 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM raw_syscalls
++#define TRACE_INCLUDE_FILE syscalls
++
++#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_EVENTS_SYSCALLS_H
++
++#include <linux/tracepoint.h>
++
++#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
++
++#ifndef _TRACE_SYSCALLS_DEF_
++#define _TRACE_SYSCALLS_DEF_
++
++#include <asm/ptrace.h>
++#include <asm/syscall.h>
++
++#endif /* _TRACE_SYSCALLS_DEF_ */
++
++TRACE_EVENT(sys_enter,
++
++ TP_PROTO(struct pt_regs *regs, long id),
++
++ TP_ARGS(regs, id),
++
++ TP_STRUCT__entry(
++ __field( long, id )
++ __array( unsigned long, args, 6 )
++ ),
++
++ TP_fast_assign(
++ tp_assign(id, id)
++ {
++ tp_memcpy(args,
++ ({
++ unsigned long args_copy[6];
++ syscall_get_arguments(current, regs,
++ 0, 6, args_copy);
++ args_copy;
++ }), 6 * sizeof(unsigned long));
++ }
++ ),
++
++ TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
++ __entry->id,
++ __entry->args[0], __entry->args[1], __entry->args[2],
++ __entry->args[3], __entry->args[4], __entry->args[5])
++)
++
++TRACE_EVENT(sys_exit,
++
++ TP_PROTO(struct pt_regs *regs, long ret),
++
++ TP_ARGS(regs, ret),
++
++ TP_STRUCT__entry(
++ __field( long, id )
++ __field( long, ret )
++ ),
++
++ TP_fast_assign(
++ tp_assign(id, syscall_get_nr(current, regs))
++ tp_assign(ret, ret)
++ ),
++
++ TP_printk("NR %ld = %ld",
++ __entry->id, __entry->ret)
++)
++
++#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
++
++#endif /* _TRACE_EVENTS_SYSCALLS_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
++
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/timer.h
+@@ -0,0 +1,336 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM timer
++
++#if !defined(_TRACE_TIMER_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_TIMER_H
++
++#include <linux/tracepoint.h>
++
++#ifndef _TRACE_TIMER_DEF_
++#define _TRACE_TIMER_DEF_
++#include <linux/hrtimer.h>
++#include <linux/timer.h>
++
++struct timer_list;
++
++#endif /* _TRACE_TIMER_DEF_ */
++
++DECLARE_EVENT_CLASS(timer_class,
++
++ TP_PROTO(struct timer_list *timer),
++
++ TP_ARGS(timer),
++
++ TP_STRUCT__entry(
++ __field( void *, timer )
++ ),
++
++ TP_fast_assign(
++ tp_assign(timer, timer)
++ ),
++
++ TP_printk("timer=%p", __entry->timer)
++)
++
++/**
++ * timer_init - called when the timer is initialized
++ * @timer: pointer to struct timer_list
++ */
++DEFINE_EVENT(timer_class, timer_init,
++
++ TP_PROTO(struct timer_list *timer),
++
++ TP_ARGS(timer)
++)
++
++/**
++ * timer_start - called when the timer is started
++ * @timer: pointer to struct timer_list
++ * @expires: the timers expiry time
++ */
++TRACE_EVENT(timer_start,
++
++ TP_PROTO(struct timer_list *timer, unsigned long expires),
++
++ TP_ARGS(timer, expires),
++
++ TP_STRUCT__entry(
++ __field( void *, timer )
++ __field( void *, function )
++ __field( unsigned long, expires )
++ __field( unsigned long, now )
++ ),
++
++ TP_fast_assign(
++ tp_assign(timer, timer)
++ tp_assign(function, timer->function)
++ tp_assign(expires, expires)
++ tp_assign(now, jiffies)
++ ),
++
++ TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]",
++ __entry->timer, __entry->function, __entry->expires,
++ (long)__entry->expires - __entry->now)
++)
++
++/**
++ * timer_expire_entry - called immediately before the timer callback
++ * @timer: pointer to struct timer_list
++ *
++ * Allows to determine the timer latency.
++ */
++TRACE_EVENT(timer_expire_entry,
++
++ TP_PROTO(struct timer_list *timer),
++
++ TP_ARGS(timer),
++
++ TP_STRUCT__entry(
++ __field( void *, timer )
++ __field( unsigned long, now )
++ __field( void *, function)
++ ),
++
++ TP_fast_assign(
++ tp_assign(timer, timer)
++ tp_assign(now, jiffies)
++ tp_assign(function, timer->function)
++ ),
++
++ TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now)
++)
++
++/**
++ * timer_expire_exit - called immediately after the timer callback returns
++ * @timer: pointer to struct timer_list
++ *
++ * When used in combination with the timer_expire_entry tracepoint we can
++ * determine the runtime of the timer callback function.
++ *
++ * NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might
++ * be invalid. We solely track the pointer.
++ */
++DEFINE_EVENT(timer_class, timer_expire_exit,
++
++ TP_PROTO(struct timer_list *timer),
++
++ TP_ARGS(timer)
++)
++
++/**
++ * timer_cancel - called when the timer is canceled
++ * @timer: pointer to struct timer_list
++ */
++DEFINE_EVENT(timer_class, timer_cancel,
++
++ TP_PROTO(struct timer_list *timer),
++
++ TP_ARGS(timer)
++)
++
++/**
++ * hrtimer_init - called when the hrtimer is initialized
++ * @timer: pointer to struct hrtimer
++ * @clockid: the hrtimers clock
++ * @mode: the hrtimers mode
++ */
++TRACE_EVENT(hrtimer_init,
++
++ TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid,
++ enum hrtimer_mode mode),
++
++ TP_ARGS(hrtimer, clockid, mode),
++
++ TP_STRUCT__entry(
++ __field( void *, hrtimer )
++ __field( clockid_t, clockid )
++ __field( enum hrtimer_mode, mode )
++ ),
++
++ TP_fast_assign(
++ tp_assign(hrtimer, hrtimer)
++ tp_assign(clockid, clockid)
++ tp_assign(mode, mode)
++ ),
++
++ TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
++ __entry->clockid == CLOCK_REALTIME ?
++ "CLOCK_REALTIME" : "CLOCK_MONOTONIC",
++ __entry->mode == HRTIMER_MODE_ABS ?
++ "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
++)
++
++/**
++ * hrtimer_start - called when the hrtimer is started
++ * @timer: pointer to struct hrtimer
++ */
++TRACE_EVENT(hrtimer_start,
++
++ TP_PROTO(struct hrtimer *hrtimer),
++
++ TP_ARGS(hrtimer),
++
++ TP_STRUCT__entry(
++ __field( void *, hrtimer )
++ __field( void *, function )
++ __field( s64, expires )
++ __field( s64, softexpires )
++ ),
++
++ TP_fast_assign(
++ tp_assign(hrtimer, hrtimer)
++ tp_assign(function, hrtimer->function)
++ tp_assign(expires, hrtimer_get_expires(hrtimer).tv64)
++ tp_assign(softexpires, hrtimer_get_softexpires(hrtimer).tv64)
++ ),
++
++ TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu",
++ __entry->hrtimer, __entry->function,
++ (unsigned long long)ktime_to_ns((ktime_t) {
++ .tv64 = __entry->expires }),
++ (unsigned long long)ktime_to_ns((ktime_t) {
++ .tv64 = __entry->softexpires }))
++)
++
++/**
++ * htimmer_expire_entry - called immediately before the hrtimer callback
++ * @timer: pointer to struct hrtimer
++ * @now: pointer to variable which contains current time of the
++ * timers base.
++ *
++ * Allows to determine the timer latency.
++ */
++TRACE_EVENT(hrtimer_expire_entry,
++
++ TP_PROTO(struct hrtimer *hrtimer, ktime_t *now),
++
++ TP_ARGS(hrtimer, now),
++
++ TP_STRUCT__entry(
++ __field( void *, hrtimer )
++ __field( s64, now )
++ __field( void *, function)
++ ),
++
++ TP_fast_assign(
++ tp_assign(hrtimer, hrtimer)
++ tp_assign(now, now->tv64)
++ tp_assign(function, hrtimer->function)
++ ),
++
++ TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function,
++ (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now }))
++)
++
++DECLARE_EVENT_CLASS(hrtimer_class,
++
++ TP_PROTO(struct hrtimer *hrtimer),
++
++ TP_ARGS(hrtimer),
++
++ TP_STRUCT__entry(
++ __field( void *, hrtimer )
++ ),
++
++ TP_fast_assign(
++ tp_assign(hrtimer, hrtimer)
++ ),
++
++ TP_printk("hrtimer=%p", __entry->hrtimer)
++)
++
++/**
++ * hrtimer_expire_exit - called immediately after the hrtimer callback returns
++ * @timer: pointer to struct hrtimer
++ *
++ * When used in combination with the hrtimer_expire_entry tracepoint we can
++ * determine the runtime of the callback function.
++ */
++DEFINE_EVENT(hrtimer_class, hrtimer_expire_exit,
++
++ TP_PROTO(struct hrtimer *hrtimer),
++
++ TP_ARGS(hrtimer)
++)
++
++/**
++ * hrtimer_cancel - called when the hrtimer is canceled
++ * @hrtimer: pointer to struct hrtimer
++ */
++DEFINE_EVENT(hrtimer_class, hrtimer_cancel,
++
++ TP_PROTO(struct hrtimer *hrtimer),
++
++ TP_ARGS(hrtimer)
++)
++
++/**
++ * itimer_state - called when itimer is started or canceled
++ * @which: name of the interval timer
++ * @value: the itimers value, itimer is canceled if value->it_value is
++ * zero, otherwise it is started
++ * @expires: the itimers expiry time
++ */
++TRACE_EVENT(itimer_state,
++
++ TP_PROTO(int which, const struct itimerval *const value,
++ cputime_t expires),
++
++ TP_ARGS(which, value, expires),
++
++ TP_STRUCT__entry(
++ __field( int, which )
++ __field( cputime_t, expires )
++ __field( long, value_sec )
++ __field( long, value_usec )
++ __field( long, interval_sec )
++ __field( long, interval_usec )
++ ),
++
++ TP_fast_assign(
++ tp_assign(which, which)
++ tp_assign(expires, expires)
++ tp_assign(value_sec, value->it_value.tv_sec)
++ tp_assign(value_usec, value->it_value.tv_usec)
++ tp_assign(interval_sec, value->it_interval.tv_sec)
++ tp_assign(interval_usec, value->it_interval.tv_usec)
++ ),
++
++ TP_printk("which=%d expires=%llu it_value=%ld.%ld it_interval=%ld.%ld",
++ __entry->which, (unsigned long long)__entry->expires,
++ __entry->value_sec, __entry->value_usec,
++ __entry->interval_sec, __entry->interval_usec)
++)
++
++/**
++ * itimer_expire - called when itimer expires
++ * @which: type of the interval timer
++ * @pid: pid of the process which owns the timer
++ * @now: current time, used to calculate the latency of itimer
++ */
++TRACE_EVENT(itimer_expire,
++
++ TP_PROTO(int which, struct pid *pid, cputime_t now),
++
++ TP_ARGS(which, pid, now),
++
++ TP_STRUCT__entry(
++ __field( int , which )
++ __field( pid_t, pid )
++ __field( cputime_t, now )
++ ),
++
++ TP_fast_assign(
++ tp_assign(which, which)
++ tp_assign(now, now)
++ tp_assign(pid, pid_nr(pid))
++ ),
++
++ TP_printk("which=%d pid=%d now=%llu", __entry->which,
++ (int) __entry->pid, (unsigned long long)__entry->now)
++)
++
++#endif /* _TRACE_TIMER_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/udp.h
+@@ -0,0 +1,32 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM udp
++
++#if !defined(_TRACE_UDP_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_UDP_H
++
++#include <linux/udp.h>
++#include <linux/tracepoint.h>
++
++TRACE_EVENT(udp_fail_queue_rcv_skb,
++
++ TP_PROTO(int rc, struct sock *sk),
++
++ TP_ARGS(rc, sk),
++
++ TP_STRUCT__entry(
++ __field(int, rc)
++ __field(__u16, lport)
++ ),
++
++ TP_fast_assign(
++ tp_assign(rc, rc)
++ tp_assign(lport, inet_sk(sk)->inet_num)
++ ),
++
++ TP_printk("rc=%d port=%hu", __entry->rc, __entry->lport)
++)
++
++#endif /* _TRACE_UDP_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/vmscan.h
+@@ -0,0 +1,594 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM vmscan
++
++#if !defined(_TRACE_VMSCAN_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_VMSCAN_H
++
++#include <linux/types.h>
++#include <linux/tracepoint.h>
++#include <linux/mm.h>
++#include <linux/memcontrol.h>
++#include <trace/events/gfpflags.h>
++#include <linux/version.h>
++
++#ifndef _TRACE_VMSCAN_DEF
++#define _TRACE_VMSCAN_DEF
++#define RECLAIM_WB_ANON 0x0001u
++#define RECLAIM_WB_FILE 0x0002u
++#define RECLAIM_WB_MIXED 0x0010u
++#define RECLAIM_WB_SYNC 0x0004u /* Unused, all reclaim async */
++#define RECLAIM_WB_ASYNC 0x0008u
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++#define show_reclaim_flags(flags) \
++ (flags) ? __print_flags(flags, "|", \
++ {RECLAIM_WB_ANON, "RECLAIM_WB_ANON"}, \
++ {RECLAIM_WB_FILE, "RECLAIM_WB_FILE"}, \
++ {RECLAIM_WB_MIXED, "RECLAIM_WB_MIXED"}, \
++ {RECLAIM_WB_SYNC, "RECLAIM_WB_SYNC"}, \
++ {RECLAIM_WB_ASYNC, "RECLAIM_WB_ASYNC"} \
++ ) : "RECLAIM_WB_NONE"
++#else
++#define show_reclaim_flags(flags) \
++ (flags) ? __print_flags(flags, "|", \
++ {RECLAIM_WB_ANON, "RECLAIM_WB_ANON"}, \
++ {RECLAIM_WB_FILE, "RECLAIM_WB_FILE"}, \
++ {RECLAIM_WB_SYNC, "RECLAIM_WB_SYNC"}, \
++ {RECLAIM_WB_ASYNC, "RECLAIM_WB_ASYNC"} \
++ ) : "RECLAIM_WB_NONE"
++#endif
++
++#if ((LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,38)) || \
++ LTTNG_KERNEL_RANGE(3,1,0, 3,2,0))
++typedef int isolate_mode_t;
++#endif
++
++#endif
++
++TRACE_EVENT(mm_vmscan_kswapd_sleep,
++
++ TP_PROTO(int nid),
++
++ TP_ARGS(nid),
++
++ TP_STRUCT__entry(
++ __field( int, nid )
++ ),
++
++ TP_fast_assign(
++ tp_assign(nid, nid)
++ ),
++
++ TP_printk("nid=%d", __entry->nid)
++)
++
++TRACE_EVENT(mm_vmscan_kswapd_wake,
++
++ TP_PROTO(int nid, int order),
++
++ TP_ARGS(nid, order),
++
++ TP_STRUCT__entry(
++ __field( int, nid )
++ __field( int, order )
++ ),
++
++ TP_fast_assign(
++ tp_assign(nid, nid)
++ tp_assign(order, order)
++ ),
++
++ TP_printk("nid=%d order=%d", __entry->nid, __entry->order)
++)
++
++TRACE_EVENT(mm_vmscan_wakeup_kswapd,
++
++ TP_PROTO(int nid, int zid, int order),
++
++ TP_ARGS(nid, zid, order),
++
++ TP_STRUCT__entry(
++ __field( int, nid )
++ __field( int, zid )
++ __field( int, order )
++ ),
++
++ TP_fast_assign(
++ tp_assign(nid, nid)
++ tp_assign(zid, zid)
++ tp_assign(order, order)
++ ),
++
++ TP_printk("nid=%d zid=%d order=%d",
++ __entry->nid,
++ __entry->zid,
++ __entry->order)
++)
++
++DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template,
++
++ TP_PROTO(int order, int may_writepage, gfp_t gfp_flags),
++
++ TP_ARGS(order, may_writepage, gfp_flags),
++
++ TP_STRUCT__entry(
++ __field( int, order )
++ __field( int, may_writepage )
++ __field( gfp_t, gfp_flags )
++ ),
++
++ TP_fast_assign(
++ tp_assign(order, order)
++ tp_assign(may_writepage, may_writepage)
++ tp_assign(gfp_flags, gfp_flags)
++ ),
++
++ TP_printk("order=%d may_writepage=%d gfp_flags=%s",
++ __entry->order,
++ __entry->may_writepage,
++ show_gfp_flags(__entry->gfp_flags))
++)
++
++DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_direct_reclaim_begin,
++
++ TP_PROTO(int order, int may_writepage, gfp_t gfp_flags),
++
++ TP_ARGS(order, may_writepage, gfp_flags)
++)
++
++DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin,
++
++ TP_PROTO(int order, int may_writepage, gfp_t gfp_flags),
++
++ TP_ARGS(order, may_writepage, gfp_flags)
++)
++
++DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_softlimit_reclaim_begin,
++
++ TP_PROTO(int order, int may_writepage, gfp_t gfp_flags),
++
++ TP_ARGS(order, may_writepage, gfp_flags)
++)
++
++DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_end_template,
++
++ TP_PROTO(unsigned long nr_reclaimed),
++
++ TP_ARGS(nr_reclaimed),
++
++ TP_STRUCT__entry(
++ __field( unsigned long, nr_reclaimed )
++ ),
++
++ TP_fast_assign(
++ tp_assign(nr_reclaimed, nr_reclaimed)
++ ),
++
++ TP_printk("nr_reclaimed=%lu", __entry->nr_reclaimed)
++)
++
++DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_direct_reclaim_end,
++
++ TP_PROTO(unsigned long nr_reclaimed),
++
++ TP_ARGS(nr_reclaimed)
++)
++
++DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_reclaim_end,
++
++ TP_PROTO(unsigned long nr_reclaimed),
++
++ TP_ARGS(nr_reclaimed)
++)
++
++DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_reclaim_end,
++
++ TP_PROTO(unsigned long nr_reclaimed),
++
++ TP_ARGS(nr_reclaimed)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
++TRACE_EVENT(mm_shrink_slab_start,
++ TP_PROTO(struct shrinker *shr, struct shrink_control *sc,
++ long nr_objects_to_shrink, unsigned long pgs_scanned,
++ unsigned long lru_pgs, unsigned long cache_items,
++ unsigned long long delta, unsigned long total_scan),
++
++ TP_ARGS(shr, sc, nr_objects_to_shrink, pgs_scanned, lru_pgs,
++ cache_items, delta, total_scan),
++
++ TP_STRUCT__entry(
++ __field(struct shrinker *, shr)
++ __field(void *, shrink)
++ __field(long, nr_objects_to_shrink)
++ __field(gfp_t, gfp_flags)
++ __field(unsigned long, pgs_scanned)
++ __field(unsigned long, lru_pgs)
++ __field(unsigned long, cache_items)
++ __field(unsigned long long, delta)
++ __field(unsigned long, total_scan)
++ ),
++
++ TP_fast_assign(
++ tp_assign(shr,shr)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
++ tp_assign(shrink, shr->scan_objects)
++#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
++ tp_assign(shrink, shr->shrink)
++#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
++ tp_assign(nr_objects_to_shrink, nr_objects_to_shrink)
++ tp_assign(gfp_flags, sc->gfp_mask)
++ tp_assign(pgs_scanned, pgs_scanned)
++ tp_assign(lru_pgs, lru_pgs)
++ tp_assign(cache_items, cache_items)
++ tp_assign(delta, delta)
++ tp_assign(total_scan, total_scan)
++ ),
++
++ TP_printk("%pF %p: objects to shrink %ld gfp_flags %s pgs_scanned %ld lru_pgs %ld cache items %ld delta %lld total_scan %ld",
++ __entry->shrink,
++ __entry->shr,
++ __entry->nr_objects_to_shrink,
++ show_gfp_flags(__entry->gfp_flags),
++ __entry->pgs_scanned,
++ __entry->lru_pgs,
++ __entry->cache_items,
++ __entry->delta,
++ __entry->total_scan)
++)
++
++TRACE_EVENT(mm_shrink_slab_end,
++ TP_PROTO(struct shrinker *shr, int shrinker_retval,
++ long unused_scan_cnt, long new_scan_cnt),
++
++ TP_ARGS(shr, shrinker_retval, unused_scan_cnt, new_scan_cnt),
++
++ TP_STRUCT__entry(
++ __field(struct shrinker *, shr)
++ __field(void *, shrink)
++ __field(long, unused_scan)
++ __field(long, new_scan)
++ __field(int, retval)
++ __field(long, total_scan)
++ ),
++
++ TP_fast_assign(
++ tp_assign(shr, shr)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
++ tp_assign(shrink, shr->scan_objects)
++#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
++ tp_assign(shrink, shr->shrink)
++#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
++ tp_assign(unused_scan, unused_scan_cnt)
++ tp_assign(new_scan, new_scan_cnt)
++ tp_assign(retval, shrinker_retval)
++ tp_assign(total_scan, new_scan_cnt - unused_scan_cnt)
++ ),
++
++ TP_printk("%pF %p: unused scan count %ld new scan count %ld total_scan %ld last shrinker return val %d",
++ __entry->shrink,
++ __entry->shr,
++ __entry->unused_scan,
++ __entry->new_scan,
++ __entry->total_scan,
++ __entry->retval)
++)
++#endif
++
++DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
++
++ TP_PROTO(int order,
++ unsigned long nr_requested,
++ unsigned long nr_scanned,
++ unsigned long nr_taken,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++ unsigned long nr_lumpy_taken,
++ unsigned long nr_lumpy_dirty,
++ unsigned long nr_lumpy_failed,
++#endif
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
++ isolate_mode_t isolate_mode
++#else
++ isolate_mode_t isolate_mode,
++ int file
++#endif
++ ),
++
++ TP_ARGS(order, nr_requested, nr_scanned, nr_taken,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++ nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed,
++#endif
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
++ isolate_mode
++#else
++ isolate_mode, file
++#endif
++ ),
++
++
++ TP_STRUCT__entry(
++ __field(int, order)
++ __field(unsigned long, nr_requested)
++ __field(unsigned long, nr_scanned)
++ __field(unsigned long, nr_taken)
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++ __field(unsigned long, nr_lumpy_taken)
++ __field(unsigned long, nr_lumpy_dirty)
++ __field(unsigned long, nr_lumpy_failed)
++#endif
++ __field(isolate_mode_t, isolate_mode)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++ __field(int, file)
++#endif
++ ),
++
++ TP_fast_assign(
++ tp_assign(order, order)
++ tp_assign(nr_requested, nr_requested)
++ tp_assign(nr_scanned, nr_scanned)
++ tp_assign(nr_taken, nr_taken)
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++ tp_assign(nr_lumpy_taken, nr_lumpy_taken)
++ tp_assign(nr_lumpy_dirty, nr_lumpy_dirty)
++ tp_assign(nr_lumpy_failed, nr_lumpy_failed)
++#endif
++ tp_assign(isolate_mode, isolate_mode)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++ tp_assign(file, file)
++#endif
++ ),
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
++ TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu contig_taken=%lu contig_dirty=%lu contig_failed=%lu",
++ __entry->isolate_mode,
++ __entry->order,
++ __entry->nr_requested,
++ __entry->nr_scanned,
++ __entry->nr_taken,
++ __entry->nr_lumpy_taken,
++ __entry->nr_lumpy_dirty,
++ __entry->nr_lumpy_failed)
++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++ TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu contig_taken=%lu contig_dirty=%lu contig_failed=%lu file=%d",
++ __entry->isolate_mode,
++ __entry->order,
++ __entry->nr_requested,
++ __entry->nr_scanned,
++ __entry->nr_taken,
++ __entry->nr_lumpy_taken,
++ __entry->nr_lumpy_dirty,
++ __entry->nr_lumpy_failed,
++ __entry->file)
++#else
++ TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu file=%d",
++ __entry->isolate_mode,
++ __entry->order,
++ __entry->nr_requested,
++ __entry->nr_scanned,
++ __entry->nr_taken,
++ __entry->file)
++#endif
++)
++
++DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_lru_isolate,
++
++ TP_PROTO(int order,
++ unsigned long nr_requested,
++ unsigned long nr_scanned,
++ unsigned long nr_taken,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++ unsigned long nr_lumpy_taken,
++ unsigned long nr_lumpy_dirty,
++ unsigned long nr_lumpy_failed,
++#endif
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
++ isolate_mode_t isolate_mode
++#else
++ isolate_mode_t isolate_mode,
++ int file
++#endif
++ ),
++
++ TP_ARGS(order, nr_requested, nr_scanned, nr_taken,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++ nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed,
++#endif
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
++ isolate_mode
++#else
++ isolate_mode, file
++#endif
++ )
++
++)
++
++DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_memcg_isolate,
++
++ TP_PROTO(int order,
++ unsigned long nr_requested,
++ unsigned long nr_scanned,
++ unsigned long nr_taken,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++ unsigned long nr_lumpy_taken,
++ unsigned long nr_lumpy_dirty,
++ unsigned long nr_lumpy_failed,
++#endif
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
++ isolate_mode_t isolate_mode
++#else
++ isolate_mode_t isolate_mode,
++ int file
++#endif
++ ),
++
++ TP_ARGS(order, nr_requested, nr_scanned, nr_taken,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++ nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed,
++#endif
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
++ isolate_mode
++#else
++ isolate_mode, file
++#endif
++ )
++)
++
++TRACE_EVENT(mm_vmscan_writepage,
++
++ TP_PROTO(struct page *page,
++ int reclaim_flags),
++
++ TP_ARGS(page, reclaim_flags),
++
++ TP_STRUCT__entry(
++ __field(struct page *, page)
++ __field(int, reclaim_flags)
++ ),
++
++ TP_fast_assign(
++ tp_assign(page, page)
++ tp_assign(reclaim_flags, reclaim_flags)
++ ),
++
++ TP_printk("page=%p pfn=%lu flags=%s",
++ __entry->page,
++ page_to_pfn(__entry->page),
++ show_reclaim_flags(__entry->reclaim_flags))
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
++
++ TP_PROTO(int nid, int zid,
++ unsigned long nr_scanned, unsigned long nr_reclaimed,
++ int priority, int reclaim_flags),
++
++ TP_ARGS(nid, zid, nr_scanned, nr_reclaimed, priority, reclaim_flags),
++
++ TP_STRUCT__entry(
++ __field(int, nid)
++ __field(int, zid)
++ __field(unsigned long, nr_scanned)
++ __field(unsigned long, nr_reclaimed)
++ __field(int, priority)
++ __field(int, reclaim_flags)
++ ),
++
++ TP_fast_assign(
++ tp_assign(nid, nid)
++ tp_assign(zid, zid)
++ tp_assign(nr_scanned, nr_scanned)
++ tp_assign(nr_reclaimed, nr_reclaimed)
++ tp_assign(priority, priority)
++ tp_assign(reclaim_flags, reclaim_flags)
++ ),
++
++ TP_printk("nid=%d zid=%d nr_scanned=%ld nr_reclaimed=%ld priority=%d flags=%s",
++ __entry->nid, __entry->zid,
++ __entry->nr_scanned, __entry->nr_reclaimed,
++ __entry->priority,
++ show_reclaim_flags(__entry->reclaim_flags))
++)
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++TRACE_EVENT_MAP(replace_swap_token,
++
++ mm_vmscan_replace_swap_token,
++
++ TP_PROTO(struct mm_struct *old_mm,
++ struct mm_struct *new_mm),
++
++ TP_ARGS(old_mm, new_mm),
++
++ TP_STRUCT__entry(
++ __field(struct mm_struct*, old_mm)
++ __field(unsigned int, old_prio)
++ __field(struct mm_struct*, new_mm)
++ __field(unsigned int, new_prio)
++ ),
++
++ TP_fast_assign(
++ tp_assign(old_mm, old_mm)
++ tp_assign(old_prio, old_mm ? old_mm->token_priority : 0)
++ tp_assign(new_mm, new_mm)
++ tp_assign(new_prio, new_mm->token_priority)
++ ),
++
++ TP_printk("old_token_mm=%p old_prio=%u new_token_mm=%p new_prio=%u",
++ __entry->old_mm, __entry->old_prio,
++ __entry->new_mm, __entry->new_prio)
++)
++
++DECLARE_EVENT_CLASS(mm_vmscan_put_swap_token_template,
++ TP_PROTO(struct mm_struct *swap_token_mm),
++
++ TP_ARGS(swap_token_mm),
++
++ TP_STRUCT__entry(
++ __field(struct mm_struct*, swap_token_mm)
++ ),
++
++ TP_fast_assign(
++ tp_assign(swap_token_mm, swap_token_mm)
++ ),
++
++ TP_printk("token_mm=%p", __entry->swap_token_mm)
++)
++
++DEFINE_EVENT_MAP(mm_vmscan_put_swap_token_template, put_swap_token,
++
++ mm_vmscan_put_swap_token,
++
++ TP_PROTO(struct mm_struct *swap_token_mm),
++ TP_ARGS(swap_token_mm)
++)
++
++DEFINE_EVENT_CONDITION_MAP(mm_vmscan_put_swap_token_template, disable_swap_token,
++
++ mm_vmscan_disable_swap_token,
++
++ TP_PROTO(struct mm_struct *swap_token_mm),
++ TP_ARGS(swap_token_mm),
++ TP_CONDITION(swap_token_mm != NULL)
++)
++
++TRACE_EVENT_CONDITION_MAP(update_swap_token_priority,
++
++ mm_vmscan_update_swap_token_priority,
++
++ TP_PROTO(struct mm_struct *mm,
++ unsigned int old_prio,
++ struct mm_struct *swap_token_mm),
++
++ TP_ARGS(mm, old_prio, swap_token_mm),
++
++ TP_CONDITION(mm->token_priority != old_prio),
++
++ TP_STRUCT__entry(
++ __field(struct mm_struct*, mm)
++ __field(unsigned int, old_prio)
++ __field(unsigned int, new_prio)
++ __field(struct mm_struct*, swap_token_mm)
++ __field(unsigned int, swap_token_prio)
++ ),
++
++ TP_fast_assign(
++ tp_assign(mm, mm)
++ tp_assign(old_prio, old_prio)
++ tp_assign(new_prio, mm->token_priority)
++ tp_assign(swap_token_mm, swap_token_mm)
++ tp_assign(swap_token_prio, swap_token_mm ? swap_token_mm->token_priority : 0)
++ ),
++
++ TP_printk("mm=%p old_prio=%u new_prio=%u swap_token_mm=%p token_prio=%u",
++ __entry->mm, __entry->old_prio, __entry->new_prio,
++ __entry->swap_token_mm, __entry->swap_token_prio)
++)
++#endif
++
++#endif /* _TRACE_VMSCAN_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/workqueue.h
+@@ -0,0 +1,219 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM workqueue
++
++#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_WORKQUEUE_H
++
++#include <linux/tracepoint.h>
++#include <linux/workqueue.h>
++#include <linux/version.h>
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
++
++#ifndef _TRACE_WORKQUEUE_DEF_
++#define _TRACE_WORKQUEUE_DEF_
++
++struct worker;
++struct global_cwq;
++
++#endif
++
++DECLARE_EVENT_CLASS(workqueue_work,
++
++ TP_PROTO(struct work_struct *work),
++
++ TP_ARGS(work),
++
++ TP_STRUCT__entry(
++ __field( void *, work )
++ ),
++
++ TP_fast_assign(
++ tp_assign(work, work)
++ ),
++
++ TP_printk("work struct %p", __entry->work)
++)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++/**
++ * workqueue_queue_work - called when a work gets queued
++ * @req_cpu: the requested cpu
++ * @cwq: pointer to struct cpu_workqueue_struct
++ * @work: pointer to struct work_struct
++ *
++ * This event occurs when a work is queued immediately or once a
++ * delayed work is actually queued on a workqueue (ie: once the delay
++ * has been reached).
++ */
++TRACE_EVENT(workqueue_queue_work,
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
++ TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq,
++ struct work_struct *work),
++
++ TP_ARGS(req_cpu, pwq, work),
++#else
++ TP_PROTO(unsigned int req_cpu, struct cpu_workqueue_struct *cwq,
++ struct work_struct *work),
++
++ TP_ARGS(req_cpu, cwq, work),
++#endif
++
++ TP_STRUCT__entry(
++ __field( void *, work )
++ __field( void *, function)
++ __field( unsigned int, req_cpu )
++ ),
++
++ TP_fast_assign(
++ tp_assign(work, work)
++ tp_assign(function, work->func)
++ tp_assign(req_cpu, req_cpu)
++ ),
++
++ TP_printk("work struct=%p function=%pf req_cpu=%u",
++ __entry->work, __entry->function,
++ __entry->req_cpu)
++)
++
++/**
++ * workqueue_activate_work - called when a work gets activated
++ * @work: pointer to struct work_struct
++ *
++ * This event occurs when a queued work is put on the active queue,
++ * which happens immediately after queueing unless @max_active limit
++ * is reached.
++ */
++DEFINE_EVENT(workqueue_work, workqueue_activate_work,
++
++ TP_PROTO(struct work_struct *work),
++
++ TP_ARGS(work)
++)
++#endif
++
++/**
++ * workqueue_execute_start - called immediately before the workqueue callback
++ * @work: pointer to struct work_struct
++ *
++ * Allows to track workqueue execution.
++ */
++TRACE_EVENT(workqueue_execute_start,
++
++ TP_PROTO(struct work_struct *work),
++
++ TP_ARGS(work),
++
++ TP_STRUCT__entry(
++ __field( void *, work )
++ __field( void *, function)
++ ),
++
++ TP_fast_assign(
++ tp_assign(work, work)
++ tp_assign(function, work->func)
++ ),
++
++ TP_printk("work struct %p: function %pf", __entry->work, __entry->function)
++)
++
++/**
++ * workqueue_execute_end - called immediately after the workqueue callback
++ * @work: pointer to struct work_struct
++ *
++ * Allows to track workqueue execution.
++ */
++DEFINE_EVENT(workqueue_work, workqueue_execute_end,
++
++ TP_PROTO(struct work_struct *work),
++
++ TP_ARGS(work)
++)
++
++#else
++
++DECLARE_EVENT_CLASS(workqueue,
++
++ TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
++
++ TP_ARGS(wq_thread, work),
++
++ TP_STRUCT__entry(
++ __array(char, thread_comm, TASK_COMM_LEN)
++ __field(pid_t, thread_pid)
++ __field(work_func_t, func)
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(thread_comm, wq_thread->comm, TASK_COMM_LEN)
++ tp_assign(thread_pid, wq_thread->pid)
++ tp_assign(func, work->func)
++ ),
++
++ TP_printk("thread=%s:%d func=%pf", __entry->thread_comm,
++ __entry->thread_pid, __entry->func)
++)
++
++DEFINE_EVENT(workqueue, workqueue_insertion,
++
++ TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
++
++ TP_ARGS(wq_thread, work)
++)
++
++DEFINE_EVENT(workqueue, workqueue_execution,
++
++ TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
++
++ TP_ARGS(wq_thread, work)
++)
++
++/* Trace the creation of one workqueue thread on a cpu */
++TRACE_EVENT(workqueue_creation,
++
++ TP_PROTO(struct task_struct *wq_thread, int cpu),
++
++ TP_ARGS(wq_thread, cpu),
++
++ TP_STRUCT__entry(
++ __array(char, thread_comm, TASK_COMM_LEN)
++ __field(pid_t, thread_pid)
++ __field(int, cpu)
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(thread_comm, wq_thread->comm, TASK_COMM_LEN)
++ tp_assign(thread_pid, wq_thread->pid)
++ tp_assign(cpu, cpu)
++ ),
++
++ TP_printk("thread=%s:%d cpu=%d", __entry->thread_comm,
++ __entry->thread_pid, __entry->cpu)
++)
++
++TRACE_EVENT(workqueue_destruction,
++
++ TP_PROTO(struct task_struct *wq_thread),
++
++ TP_ARGS(wq_thread),
++
++ TP_STRUCT__entry(
++ __array(char, thread_comm, TASK_COMM_LEN)
++ __field(pid_t, thread_pid)
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(thread_comm, wq_thread->comm, TASK_COMM_LEN)
++ tp_assign(thread_pid, wq_thread->pid)
++ ),
++
++ TP_printk("thread=%s:%d", __entry->thread_comm, __entry->thread_pid)
++)
++
++#endif
++
++#endif /* _TRACE_WORKQUEUE_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/lttng-module/writeback.h
+@@ -0,0 +1,617 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM writeback
++
++#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_WRITEBACK_H
++
++#include <linux/backing-dev.h>
++#include <linux/writeback.h>
++#include <linux/version.h>
++
++#ifndef _TRACE_WRITEBACK_DEF_
++#define _TRACE_WRITEBACK_DEF_
++static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
++{
++ struct super_block *sb = inode->i_sb;
++
++ if (strcmp(sb->s_type->name, "bdev") == 0)
++ return inode->i_mapping->backing_dev_info;
++
++ return sb->s_bdi;
++}
++#endif
++
++#define show_inode_state(state) \
++ __print_flags(state, "|", \
++ {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \
++ {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \
++ {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \
++ {I_NEW, "I_NEW"}, \
++ {I_WILL_FREE, "I_WILL_FREE"}, \
++ {I_FREEING, "I_FREEING"}, \
++ {I_CLEAR, "I_CLEAR"}, \
++ {I_SYNC, "I_SYNC"}, \
++ {I_REFERENCED, "I_REFERENCED"} \
++ )
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
++#define WB_WORK_REASON \
++ {WB_REASON_BACKGROUND, "background"}, \
++ {WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages"}, \
++ {WB_REASON_SYNC, "sync"}, \
++ {WB_REASON_PERIODIC, "periodic"}, \
++ {WB_REASON_LAPTOP_TIMER, "laptop_timer"}, \
++ {WB_REASON_FREE_MORE_MEM, "free_more_memory"}, \
++ {WB_REASON_FS_FREE_SPACE, "fs_free_space"}, \
++ {WB_REASON_FORKER_THREAD, "forker_thread"}
++#endif
++
++DECLARE_EVENT_CLASS(writeback_work_class,
++ TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work),
++ TP_ARGS(bdi, work),
++ TP_STRUCT__entry(
++ __array(char, name, 32)
++ ),
++ TP_fast_assign(
++ tp_memcpy(name, dev_name(bdi->dev ? bdi->dev :
++ default_backing_dev_info.dev), 32)
++ ),
++ TP_printk("bdi %s",
++ __entry->name
++ )
++)
++#define DEFINE_WRITEBACK_WORK_EVENT(name) \
++DEFINE_EVENT(writeback_work_class, name, \
++ TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
++ TP_ARGS(bdi, work))
++DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread)
++DEFINE_WRITEBACK_WORK_EVENT(writeback_queue)
++DEFINE_WRITEBACK_WORK_EVENT(writeback_exec)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
++DEFINE_WRITEBACK_WORK_EVENT(writeback_start)
++DEFINE_WRITEBACK_WORK_EVENT(writeback_written)
++DEFINE_WRITEBACK_WORK_EVENT(writeback_wait)
++#endif
++
++TRACE_EVENT(writeback_pages_written,
++ TP_PROTO(long pages_written),
++ TP_ARGS(pages_written),
++ TP_STRUCT__entry(
++ __field(long, pages)
++ ),
++ TP_fast_assign(
++ tp_assign(pages, pages_written)
++ ),
++ TP_printk("%ld", __entry->pages)
++)
++
++DECLARE_EVENT_CLASS(writeback_class,
++ TP_PROTO(struct backing_dev_info *bdi),
++ TP_ARGS(bdi),
++ TP_STRUCT__entry(
++ __array(char, name, 32)
++ ),
++ TP_fast_assign(
++ tp_memcpy(name, dev_name(bdi->dev), 32)
++ ),
++ TP_printk("bdi %s",
++ __entry->name
++ )
++)
++#define DEFINE_WRITEBACK_EVENT(name) \
++DEFINE_EVENT(writeback_class, name, \
++ TP_PROTO(struct backing_dev_info *bdi), \
++ TP_ARGS(bdi))
++
++#define DEFINE_WRITEBACK_EVENT_MAP(name, map) \
++DEFINE_EVENT_MAP(writeback_class, name, map, \
++ TP_PROTO(struct backing_dev_info *bdi), \
++ TP_ARGS(bdi))
++
++DEFINE_WRITEBACK_EVENT(writeback_nowork)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
++DEFINE_WRITEBACK_EVENT(writeback_wake_background)
++#endif
++DEFINE_WRITEBACK_EVENT(writeback_wake_thread)
++DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread)
++DEFINE_WRITEBACK_EVENT(writeback_bdi_register)
++DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister)
++DEFINE_WRITEBACK_EVENT(writeback_thread_start)
++DEFINE_WRITEBACK_EVENT(writeback_thread_stop)
++#if (LTTNG_KERNEL_RANGE(3,1,0, 3,2,0))
++DEFINE_WRITEBACK_EVENT_MAP(balance_dirty_start, writeback_balance_dirty_start)
++DEFINE_WRITEBACK_EVENT_MAP(balance_dirty_wait, writeback_balance_dirty_wait)
++
++TRACE_EVENT_MAP(balance_dirty_written,
++
++ writeback_balance_dirty_written,
++
++ TP_PROTO(struct backing_dev_info *bdi, int written),
++
++ TP_ARGS(bdi, written),
++
++ TP_STRUCT__entry(
++ __array(char, name, 32)
++ __field(int, written)
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(name, dev_name(bdi->dev), 32)
++ tp_assign(written, written)
++ ),
++
++ TP_printk("bdi %s written %d",
++ __entry->name,
++ __entry->written
++ )
++)
++#endif
++
++DECLARE_EVENT_CLASS(writeback_wbc_class,
++ TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
++ TP_ARGS(wbc, bdi),
++ TP_STRUCT__entry(
++ __array(char, name, 32)
++ __field(long, nr_to_write)
++ __field(long, pages_skipped)
++ __field(int, sync_mode)
++ __field(int, for_kupdate)
++ __field(int, for_background)
++ __field(int, for_reclaim)
++ __field(int, range_cyclic)
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
++ __field(int, more_io)
++ __field(unsigned long, older_than_this)
++#endif
++ __field(long, range_start)
++ __field(long, range_end)
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(name, dev_name(bdi->dev), 32)
++ tp_assign(nr_to_write, wbc->nr_to_write)
++ tp_assign(pages_skipped, wbc->pages_skipped)
++ tp_assign(sync_mode, wbc->sync_mode)
++ tp_assign(for_kupdate, wbc->for_kupdate)
++ tp_assign(for_background, wbc->for_background)
++ tp_assign(for_reclaim, wbc->for_reclaim)
++ tp_assign(range_cyclic, wbc->range_cyclic)
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
++ tp_assign(more_io, wbc->more_io)
++ tp_assign(older_than_this, wbc->older_than_this ?
++ *wbc->older_than_this : 0)
++#endif
++ tp_assign(range_start, (long)wbc->range_start)
++ tp_assign(range_end, (long)wbc->range_end)
++ ),
++
++ TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
++ "bgrd=%d reclm=%d cyclic=%d more=%d older=0x%lx "
++#else
++ "bgrd=%d reclm=%d cyclic=%d "
++#endif
++ "start=0x%lx end=0x%lx",
++ __entry->name,
++ __entry->nr_to_write,
++ __entry->pages_skipped,
++ __entry->sync_mode,
++ __entry->for_kupdate,
++ __entry->for_background,
++ __entry->for_reclaim,
++ __entry->range_cyclic,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
++ __entry->more_io,
++ __entry->older_than_this,
++#endif
++ __entry->range_start,
++ __entry->range_end)
++)
++
++#undef DEFINE_WBC_EVENT
++#define DEFINE_WBC_EVENT(name, map) \
++DEFINE_EVENT_MAP(writeback_wbc_class, name, map, \
++ TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
++ TP_ARGS(wbc, bdi))
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))
++DEFINE_WBC_EVENT(wbc_writeback_start, writeback_wbc_writeback_start)
++DEFINE_WBC_EVENT(wbc_writeback_written, writeback_wbc_writeback_written)
++DEFINE_WBC_EVENT(wbc_writeback_wait, writeback_wbc_writeback_wait)
++DEFINE_WBC_EVENT(wbc_balance_dirty_start, writeback_wbc_balance_dirty_start)
++DEFINE_WBC_EVENT(wbc_balance_dirty_written, writeback_wbc_balance_dirty_written)
++DEFINE_WBC_EVENT(wbc_balance_dirty_wait, writeback_wbc_balance_dirty_wait)
++#endif
++DEFINE_WBC_EVENT(wbc_writepage, writeback_wbc_writepage)
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
++TRACE_EVENT(writeback_queue_io,
++ TP_PROTO(struct bdi_writeback *wb,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
++ struct wb_writeback_work *work,
++#else
++ unsigned long *older_than_this,
++#endif
++ int moved),
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
++ TP_ARGS(wb, work, moved),
++#else
++ TP_ARGS(wb, older_than_this, moved),
++#endif
++ TP_STRUCT__entry(
++ __array(char, name, 32)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
++#else
++ __field(unsigned long, older)
++ __field(long, age)
++#endif
++ __field(int, moved)
++ ),
++ TP_fast_assign(
++ tp_memcpy(name, dev_name(wb->bdi->dev), 32)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
++#else
++ tp_assign(older, older_than_this ? *older_than_this : 0)
++ tp_assign(age, older_than_this ?
++ (jiffies - *older_than_this) * 1000 / HZ : -1)
++#endif
++ tp_assign(moved, moved)
++ ),
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
++ TP_printk("bdi %s: enqueue=%d",
++ __entry->name,
++ __entry->moved,
++ )
++#else
++ TP_printk("bdi %s: older=%lu age=%ld enqueue=%d",
++ __entry->name,
++ __entry->older, /* older_than_this in jiffies */
++ __entry->age, /* older_than_this in relative milliseconds */
++ __entry->moved
++ )
++#endif
++)
++
++TRACE_EVENT_MAP(global_dirty_state,
++
++ writeback_global_dirty_state,
++
++ TP_PROTO(unsigned long background_thresh,
++ unsigned long dirty_thresh
++ ),
++
++ TP_ARGS(background_thresh,
++ dirty_thresh
++ ),
++
++ TP_STRUCT__entry(
++ __field(unsigned long, nr_dirty)
++ __field(unsigned long, nr_writeback)
++ __field(unsigned long, nr_unstable)
++ __field(unsigned long, background_thresh)
++ __field(unsigned long, dirty_thresh)
++ __field(unsigned long, dirty_limit)
++ __field(unsigned long, nr_dirtied)
++ __field(unsigned long, nr_written)
++ ),
++
++ TP_fast_assign(
++ tp_assign(nr_dirty, global_page_state(NR_FILE_DIRTY))
++ tp_assign(nr_writeback, global_page_state(NR_WRITEBACK))
++ tp_assign(nr_unstable, global_page_state(NR_UNSTABLE_NFS))
++ tp_assign(nr_dirtied, global_page_state(NR_DIRTIED))
++ tp_assign(nr_written, global_page_state(NR_WRITTEN))
++ tp_assign(background_thresh, background_thresh)
++ tp_assign(dirty_thresh, dirty_thresh)
++ tp_assign(dirty_limit, global_dirty_limit)
++ ),
++
++ TP_printk("dirty=%lu writeback=%lu unstable=%lu "
++ "bg_thresh=%lu thresh=%lu limit=%lu "
++ "dirtied=%lu written=%lu",
++ __entry->nr_dirty,
++ __entry->nr_writeback,
++ __entry->nr_unstable,
++ __entry->background_thresh,
++ __entry->dirty_thresh,
++ __entry->dirty_limit,
++ __entry->nr_dirtied,
++ __entry->nr_written
++ )
++)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
++
++#define KBps(x) ((x) << (PAGE_SHIFT - 10))
++
++TRACE_EVENT_MAP(bdi_dirty_ratelimit,
++
++ writeback_bdi_dirty_ratelimit,
++
++ TP_PROTO(struct backing_dev_info *bdi,
++ unsigned long dirty_rate,
++ unsigned long task_ratelimit),
++
++ TP_ARGS(bdi, dirty_rate, task_ratelimit),
++
++ TP_STRUCT__entry(
++ __array(char, bdi, 32)
++ __field(unsigned long, write_bw)
++ __field(unsigned long, avg_write_bw)
++ __field(unsigned long, dirty_rate)
++ __field(unsigned long, dirty_ratelimit)
++ __field(unsigned long, task_ratelimit)
++ __field(unsigned long, balanced_dirty_ratelimit)
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(bdi, dev_name(bdi->dev), 32)
++ tp_assign(write_bw, KBps(bdi->write_bandwidth))
++ tp_assign(avg_write_bw, KBps(bdi->avg_write_bandwidth))
++ tp_assign(dirty_rate, KBps(dirty_rate))
++ tp_assign(dirty_ratelimit, KBps(bdi->dirty_ratelimit))
++ tp_assign(task_ratelimit, KBps(task_ratelimit))
++ tp_assign(balanced_dirty_ratelimit,
++ KBps(bdi->balanced_dirty_ratelimit))
++ ),
++
++ TP_printk("bdi %s: "
++ "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
++ "dirty_ratelimit=%lu task_ratelimit=%lu "
++ "balanced_dirty_ratelimit=%lu",
++ __entry->bdi,
++ __entry->write_bw, /* write bandwidth */
++ __entry->avg_write_bw, /* avg write bandwidth */
++ __entry->dirty_rate, /* bdi dirty rate */
++ __entry->dirty_ratelimit, /* base ratelimit */
++ __entry->task_ratelimit, /* ratelimit with position control */
++ __entry->balanced_dirty_ratelimit /* the balanced ratelimit */
++ )
++)
++
++TRACE_EVENT_MAP(balance_dirty_pages,
++
++ writeback_balance_dirty_pages,
++
++ TP_PROTO(struct backing_dev_info *bdi,
++ unsigned long thresh,
++ unsigned long bg_thresh,
++ unsigned long dirty,
++ unsigned long bdi_thresh,
++ unsigned long bdi_dirty,
++ unsigned long dirty_ratelimit,
++ unsigned long task_ratelimit,
++ unsigned long dirtied,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++ unsigned long period,
++#endif
++ long pause,
++ unsigned long start_time),
++
++ TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
++ dirty_ratelimit, task_ratelimit,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++ dirtied, period, pause, start_time),
++#else
++ dirtied, pause, start_time),
++#endif
++ TP_STRUCT__entry(
++ __array( char, bdi, 32)
++ __field(unsigned long, limit)
++ __field(unsigned long, setpoint)
++ __field(unsigned long, dirty)
++ __field(unsigned long, bdi_setpoint)
++ __field(unsigned long, bdi_dirty)
++ __field(unsigned long, dirty_ratelimit)
++ __field(unsigned long, task_ratelimit)
++ __field(unsigned int, dirtied)
++ __field(unsigned int, dirtied_pause)
++ __field(unsigned long, paused)
++ __field( long, pause)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++ __field(unsigned long, period)
++ __field( long, think)
++#endif
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(bdi, dev_name(bdi->dev), 32)
++ tp_assign(limit, global_dirty_limit)
++ tp_assign(setpoint,
++ (global_dirty_limit + (thresh + bg_thresh) / 2) / 2)
++ tp_assign(dirty, dirty)
++ tp_assign(bdi_setpoint,
++ ((global_dirty_limit + (thresh + bg_thresh) / 2) / 2) *
++ bdi_thresh / (thresh + 1))
++ tp_assign(bdi_dirty, bdi_dirty)
++ tp_assign(dirty_ratelimit, KBps(dirty_ratelimit))
++ tp_assign(task_ratelimit, KBps(task_ratelimit))
++ tp_assign(dirtied, dirtied)
++ tp_assign(dirtied_pause, current->nr_dirtied_pause)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++ tp_assign(think, current->dirty_paused_when == 0 ? 0 :
++ (long)(jiffies - current->dirty_paused_when) * 1000/HZ)
++ tp_assign(period, period * 1000 / HZ)
++#endif
++ tp_assign(pause, pause * 1000 / HZ)
++ tp_assign(paused, (jiffies - start_time) * 1000 / HZ)
++ ),
++
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
++ TP_printk("bdi %s: "
++ "limit=%lu setpoint=%lu dirty=%lu "
++ "bdi_setpoint=%lu bdi_dirty=%lu "
++ "dirty_ratelimit=%lu task_ratelimit=%lu "
++ "dirtied=%u dirtied_pause=%u "
++ "paused=%lu pause=%ld period=%lu think=%ld",
++ __entry->bdi,
++ __entry->limit,
++ __entry->setpoint,
++ __entry->dirty,
++ __entry->bdi_setpoint,
++ __entry->bdi_dirty,
++ __entry->dirty_ratelimit,
++ __entry->task_ratelimit,
++ __entry->dirtied,
++ __entry->dirtied_pause,
++ __entry->paused, /* ms */
++ __entry->pause, /* ms */
++ __entry->period, /* ms */
++ __entry->think /* ms */
++ )
++#else
++ TP_printk("bdi %s: "
++ "limit=%lu setpoint=%lu dirty=%lu "
++ "bdi_setpoint=%lu bdi_dirty=%lu "
++ "dirty_ratelimit=%lu task_ratelimit=%lu "
++ "dirtied=%u dirtied_pause=%u "
++ "paused=%lu pause=%ld",
++ __entry->bdi,
++ __entry->limit,
++ __entry->setpoint,
++ __entry->dirty,
++ __entry->bdi_setpoint,
++ __entry->bdi_dirty,
++ __entry->dirty_ratelimit,
++ __entry->task_ratelimit,
++ __entry->dirtied,
++ __entry->dirtied_pause,
++ __entry->paused, /* ms */
++ __entry->pause /* ms */
++ )
++#endif
++)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0))
++TRACE_EVENT(writeback_sb_inodes_requeue,
++
++ TP_PROTO(struct inode *inode),
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __array(char, name, 32)
++ __field(unsigned long, ino)
++ __field(unsigned long, state)
++ __field(unsigned long, dirtied_when)
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(name, dev_name(inode_to_bdi(inode)->dev), 32)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(state, inode->i_state)
++ tp_assign(dirtied_when, inode->dirtied_when)
++ ),
++
++ TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu",
++ __entry->name,
++ __entry->ino,
++ show_inode_state(__entry->state),
++ __entry->dirtied_when,
++ (jiffies - __entry->dirtied_when) / HZ
++ )
++)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++DECLARE_EVENT_CLASS(writeback_congest_waited_template,
++
++ TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
++
++ TP_ARGS(usec_timeout, usec_delayed),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, usec_timeout )
++ __field( unsigned int, usec_delayed )
++ ),
++
++ TP_fast_assign(
++ tp_assign(usec_timeout, usec_timeout)
++ tp_assign(usec_delayed, usec_delayed)
++ ),
++
++ TP_printk("usec_timeout=%u usec_delayed=%u",
++ __entry->usec_timeout,
++ __entry->usec_delayed)
++)
++
++DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
++
++ TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
++
++ TP_ARGS(usec_timeout, usec_delayed)
++)
++
++DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
++
++ TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
++
++ TP_ARGS(usec_timeout, usec_delayed)
++)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
++DECLARE_EVENT_CLASS(writeback_single_inode_template,
++
++ TP_PROTO(struct inode *inode,
++ struct writeback_control *wbc,
++ unsigned long nr_to_write
++ ),
++
++ TP_ARGS(inode, wbc, nr_to_write),
++
++ TP_STRUCT__entry(
++ __array(char, name, 32)
++ __field(unsigned long, ino)
++ __field(unsigned long, state)
++ __field(unsigned long, dirtied_when)
++ __field(unsigned long, writeback_index)
++ __field(long, nr_to_write)
++ __field(unsigned long, wrote)
++ ),
++
++ TP_fast_assign(
++ tp_memcpy(name, dev_name(inode_to_bdi(inode)->dev), 32)
++ tp_assign(ino, inode->i_ino)
++ tp_assign(state, inode->i_state)
++ tp_assign(dirtied_when, inode->dirtied_when)
++ tp_assign(writeback_index, inode->i_mapping->writeback_index)
++ tp_assign(nr_to_write, nr_to_write)
++ tp_assign(wrote, nr_to_write - wbc->nr_to_write)
++ ),
++
++ TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
++ "index=%lu to_write=%ld wrote=%lu",
++ __entry->name,
++ __entry->ino,
++ show_inode_state(__entry->state),
++ __entry->dirtied_when,
++ (jiffies - __entry->dirtied_when) / HZ,
++ __entry->writeback_index,
++ __entry->nr_to_write,
++ __entry->wrote
++ )
++)
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0))
++DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_requeue,
++ TP_PROTO(struct inode *inode,
++ struct writeback_control *wbc,
++ unsigned long nr_to_write),
++ TP_ARGS(inode, wbc, nr_to_write)
++)
++#endif
++
++DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
++ TP_PROTO(struct inode *inode,
++ struct writeback_control *wbc,
++ unsigned long nr_to_write),
++ TP_ARGS(inode, wbc, nr_to_write)
++)
++#endif
++
++#endif /* _TRACE_WRITEBACK_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/arch/x86/kvm/mmutrace.h
+@@ -0,0 +1,285 @@
++#if !defined(_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_KVMMMU_H
++
++#include <linux/tracepoint.h>
++#include <linux/ftrace_event.h>
++
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM kvmmmu
++
++#define KVM_MMU_PAGE_FIELDS \
++ __field(__u64, gfn) \
++ __field(__u32, role) \
++ __field(__u32, root_count) \
++ __field(bool, unsync)
++
++#define KVM_MMU_PAGE_ASSIGN(sp) \
++ __entry->gfn = sp->gfn; \
++ __entry->role = sp->role.word; \
++ __entry->root_count = sp->root_count; \
++ __entry->unsync = sp->unsync;
++
++#define KVM_MMU_PAGE_PRINTK() ({ \
++ const char *ret = p->buffer + p->len; \
++ static const char *access_str[] = { \
++ "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \
++ }; \
++ union kvm_mmu_page_role role; \
++ \
++ role.word = __entry->role; \
++ \
++ trace_seq_printf(p, "sp gfn %llx %u%s q%u%s %s%s" \
++ " %snxe root %u %s%c", \
++ __entry->gfn, role.level, \
++ role.cr4_pae ? " pae" : "", \
++ role.quadrant, \
++ role.direct ? " direct" : "", \
++ access_str[role.access], \
++ role.invalid ? " invalid" : "", \
++ role.nxe ? "" : "!", \
++ __entry->root_count, \
++ __entry->unsync ? "unsync" : "sync", 0); \
++ ret; \
++ })
++
++#define kvm_mmu_trace_pferr_flags \
++ { PFERR_PRESENT_MASK, "P" }, \
++ { PFERR_WRITE_MASK, "W" }, \
++ { PFERR_USER_MASK, "U" }, \
++ { PFERR_RSVD_MASK, "RSVD" }, \
++ { PFERR_FETCH_MASK, "F" }
++
++/*
++ * A pagetable walk has started
++ */
++TRACE_EVENT(
++ kvm_mmu_pagetable_walk,
++ TP_PROTO(u64 addr, u32 pferr),
++ TP_ARGS(addr, pferr),
++
++ TP_STRUCT__entry(
++ __field(__u64, addr)
++ __field(__u32, pferr)
++ ),
++
++ TP_fast_assign(
++ __entry->addr = addr;
++ __entry->pferr = pferr;
++ ),
++
++ TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
++ __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
++);
++
++
++/* We just walked a paging element */
++TRACE_EVENT(
++ kvm_mmu_paging_element,
++ TP_PROTO(u64 pte, int level),
++ TP_ARGS(pte, level),
++
++ TP_STRUCT__entry(
++ __field(__u64, pte)
++ __field(__u32, level)
++ ),
++
++ TP_fast_assign(
++ __entry->pte = pte;
++ __entry->level = level;
++ ),
++
++ TP_printk("pte %llx level %u", __entry->pte, __entry->level)
++);
++
++DECLARE_EVENT_CLASS(kvm_mmu_set_bit_class,
++
++ TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
++
++ TP_ARGS(table_gfn, index, size),
++
++ TP_STRUCT__entry(
++ __field(__u64, gpa)
++ ),
++
++ TP_fast_assign(
++ __entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
++ + index * size;
++ ),
++
++ TP_printk("gpa %llx", __entry->gpa)
++);
++
++/* We set a pte accessed bit */
++DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
++
++ TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
++
++ TP_ARGS(table_gfn, index, size)
++);
++
++/* We set a pte dirty bit */
++DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
++
++ TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
++
++ TP_ARGS(table_gfn, index, size)
++);
++
++TRACE_EVENT(
++ kvm_mmu_walker_error,
++ TP_PROTO(u32 pferr),
++ TP_ARGS(pferr),
++
++ TP_STRUCT__entry(
++ __field(__u32, pferr)
++ ),
++
++ TP_fast_assign(
++ __entry->pferr = pferr;
++ ),
++
++ TP_printk("pferr %x %s", __entry->pferr,
++ __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
++);
++
++TRACE_EVENT(
++ kvm_mmu_get_page,
++ TP_PROTO(struct kvm_mmu_page *sp, bool created),
++ TP_ARGS(sp, created),
++
++ TP_STRUCT__entry(
++ KVM_MMU_PAGE_FIELDS
++ __field(bool, created)
++ ),
++
++ TP_fast_assign(
++ KVM_MMU_PAGE_ASSIGN(sp)
++ __entry->created = created;
++ ),
++
++ TP_printk("%s %s", KVM_MMU_PAGE_PRINTK(),
++ __entry->created ? "new" : "existing")
++);
++
++DECLARE_EVENT_CLASS(kvm_mmu_page_class,
++
++ TP_PROTO(struct kvm_mmu_page *sp),
++ TP_ARGS(sp),
++
++ TP_STRUCT__entry(
++ KVM_MMU_PAGE_FIELDS
++ ),
++
++ TP_fast_assign(
++ KVM_MMU_PAGE_ASSIGN(sp)
++ ),
++
++ TP_printk("%s", KVM_MMU_PAGE_PRINTK())
++);
++
++DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_sync_page,
++ TP_PROTO(struct kvm_mmu_page *sp),
++
++ TP_ARGS(sp)
++);
++
++DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_unsync_page,
++ TP_PROTO(struct kvm_mmu_page *sp),
++
++ TP_ARGS(sp)
++);
++
++DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
++ TP_PROTO(struct kvm_mmu_page *sp),
++
++ TP_ARGS(sp)
++);
++
++TRACE_EVENT(
++ mark_mmio_spte,
++ TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access),
++ TP_ARGS(sptep, gfn, access),
++
++ TP_STRUCT__entry(
++ __field(void *, sptep)
++ __field(gfn_t, gfn)
++ __field(unsigned, access)
++ ),
++
++ TP_fast_assign(
++ __entry->sptep = sptep;
++ __entry->gfn = gfn;
++ __entry->access = access;
++ ),
++
++ TP_printk("sptep:%p gfn %llx access %x", __entry->sptep, __entry->gfn,
++ __entry->access)
++);
++
++TRACE_EVENT(
++ handle_mmio_page_fault,
++ TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
++ TP_ARGS(addr, gfn, access),
++
++ TP_STRUCT__entry(
++ __field(u64, addr)
++ __field(gfn_t, gfn)
++ __field(unsigned, access)
++ ),
++
++ TP_fast_assign(
++ __entry->addr = addr;
++ __entry->gfn = gfn;
++ __entry->access = access;
++ ),
++
++ TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
++ __entry->access)
++);
++
++#define __spte_satisfied(__spte) \
++ (__entry->retry && is_writable_pte(__entry->__spte))
++
++TRACE_EVENT(
++ fast_page_fault,
++ TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
++ u64 *sptep, u64 old_spte, bool retry),
++ TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
++
++ TP_STRUCT__entry(
++ __field(int, vcpu_id)
++ __field(gva_t, gva)
++ __field(u32, error_code)
++ __field(u64 *, sptep)
++ __field(u64, old_spte)
++ __field(u64, new_spte)
++ __field(bool, retry)
++ ),
++
++ TP_fast_assign(
++ __entry->vcpu_id = vcpu->vcpu_id;
++ __entry->gva = gva;
++ __entry->error_code = error_code;
++ __entry->sptep = sptep;
++ __entry->old_spte = old_spte;
++ __entry->new_spte = *sptep;
++ __entry->retry = retry;
++ ),
++
++ TP_printk("vcpu %d gva %lx error_code %s sptep %p old %#llx"
++ " new %llx spurious %d fixed %d", __entry->vcpu_id,
++ __entry->gva, __print_flags(__entry->error_code, "|",
++ kvm_mmu_trace_pferr_flags), __entry->sptep,
++ __entry->old_spte, __entry->new_spte,
++ __spte_satisfied(old_spte), __spte_satisfied(new_spte)
++ )
++);
++#endif /* _TRACE_KVMMMU_H */
++
++#undef TRACE_INCLUDE_PATH
++#define TRACE_INCLUDE_PATH .
++#undef TRACE_INCLUDE_FILE
++#define TRACE_INCLUDE_FILE mmutrace
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/arch/x86/kvm/trace.h
+@@ -0,0 +1,828 @@
++#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_KVM_H
++
++#include <linux/tracepoint.h>
++#include <asm/vmx.h>
++#include <asm/svm.h>
++#include <asm/clocksource.h>
++
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM kvm
++
++/*
++ * Tracepoint for guest mode entry.
++ */
++TRACE_EVENT(kvm_entry,
++ TP_PROTO(unsigned int vcpu_id),
++ TP_ARGS(vcpu_id),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, vcpu_id )
++ ),
++
++ TP_fast_assign(
++ __entry->vcpu_id = vcpu_id;
++ ),
++
++ TP_printk("vcpu %u", __entry->vcpu_id)
++);
++
++/*
++ * Tracepoint for hypercall.
++ */
++TRACE_EVENT(kvm_hypercall,
++ TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
++ unsigned long a2, unsigned long a3),
++ TP_ARGS(nr, a0, a1, a2, a3),
++
++ TP_STRUCT__entry(
++ __field( unsigned long, nr )
++ __field( unsigned long, a0 )
++ __field( unsigned long, a1 )
++ __field( unsigned long, a2 )
++ __field( unsigned long, a3 )
++ ),
++
++ TP_fast_assign(
++ __entry->nr = nr;
++ __entry->a0 = a0;
++ __entry->a1 = a1;
++ __entry->a2 = a2;
++ __entry->a3 = a3;
++ ),
++
++ TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
++ __entry->nr, __entry->a0, __entry->a1, __entry->a2,
++ __entry->a3)
++);
++
++/*
++ * Tracepoint for hypercall.
++ */
++TRACE_EVENT(kvm_hv_hypercall,
++ TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx,
++ __u64 ingpa, __u64 outgpa),
++ TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa),
++
++ TP_STRUCT__entry(
++ __field( __u16, rep_cnt )
++ __field( __u16, rep_idx )
++ __field( __u64, ingpa )
++ __field( __u64, outgpa )
++ __field( __u16, code )
++ __field( bool, fast )
++ ),
++
++ TP_fast_assign(
++ __entry->rep_cnt = rep_cnt;
++ __entry->rep_idx = rep_idx;
++ __entry->ingpa = ingpa;
++ __entry->outgpa = outgpa;
++ __entry->code = code;
++ __entry->fast = fast;
++ ),
++
++ TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
++ __entry->code, __entry->fast ? "fast" : "slow",
++ __entry->rep_cnt, __entry->rep_idx, __entry->ingpa,
++ __entry->outgpa)
++);
++
++/*
++ * Tracepoint for PIO.
++ */
++TRACE_EVENT(kvm_pio,
++ TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
++ unsigned int count),
++ TP_ARGS(rw, port, size, count),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, rw )
++ __field( unsigned int, port )
++ __field( unsigned int, size )
++ __field( unsigned int, count )
++ ),
++
++ TP_fast_assign(
++ __entry->rw = rw;
++ __entry->port = port;
++ __entry->size = size;
++ __entry->count = count;
++ ),
++
++ TP_printk("pio_%s at 0x%x size %d count %d",
++ __entry->rw ? "write" : "read",
++ __entry->port, __entry->size, __entry->count)
++);
++
++/*
++ * Tracepoint for cpuid.
++ */
++TRACE_EVENT(kvm_cpuid,
++ TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx,
++ unsigned long rcx, unsigned long rdx),
++ TP_ARGS(function, rax, rbx, rcx, rdx),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, function )
++ __field( unsigned long, rax )
++ __field( unsigned long, rbx )
++ __field( unsigned long, rcx )
++ __field( unsigned long, rdx )
++ ),
++
++ TP_fast_assign(
++ __entry->function = function;
++ __entry->rax = rax;
++ __entry->rbx = rbx;
++ __entry->rcx = rcx;
++ __entry->rdx = rdx;
++ ),
++
++ TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx",
++ __entry->function, __entry->rax,
++ __entry->rbx, __entry->rcx, __entry->rdx)
++);
++
++#define AREG(x) { APIC_##x, "APIC_" #x }
++
++#define kvm_trace_symbol_apic \
++ AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI), \
++ AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR), \
++ AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \
++ AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR), \
++ AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT), \
++ AREG(ECTRL)
++/*
++ * Tracepoint for apic access.
++ */
++TRACE_EVENT(kvm_apic,
++ TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val),
++ TP_ARGS(rw, reg, val),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, rw )
++ __field( unsigned int, reg )
++ __field( unsigned int, val )
++ ),
++
++ TP_fast_assign(
++ __entry->rw = rw;
++ __entry->reg = reg;
++ __entry->val = val;
++ ),
++
++ TP_printk("apic_%s %s = 0x%x",
++ __entry->rw ? "write" : "read",
++ __print_symbolic(__entry->reg, kvm_trace_symbol_apic),
++ __entry->val)
++);
++
++#define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val)
++#define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val)
++
++#define KVM_ISA_VMX 1
++#define KVM_ISA_SVM 2
++
++/*
++ * Tracepoint for kvm guest exit:
++ */
++TRACE_EVENT(kvm_exit,
++ TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
++ TP_ARGS(exit_reason, vcpu, isa),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, exit_reason )
++ __field( unsigned long, guest_rip )
++ __field( u32, isa )
++ __field( u64, info1 )
++ __field( u64, info2 )
++ ),
++
++ TP_fast_assign(
++ __entry->exit_reason = exit_reason;
++ __entry->guest_rip = kvm_rip_read(vcpu);
++ __entry->isa = isa;
++ kvm_x86_ops->get_exit_info(vcpu, &__entry->info1,
++ &__entry->info2);
++ ),
++
++ TP_printk("reason %s rip 0x%lx info %llx %llx",
++ (__entry->isa == KVM_ISA_VMX) ?
++ __print_symbolic(__entry->exit_reason, VMX_EXIT_REASONS) :
++ __print_symbolic(__entry->exit_reason, SVM_EXIT_REASONS),
++ __entry->guest_rip, __entry->info1, __entry->info2)
++);
++
++/*
++ * Tracepoint for kvm interrupt injection:
++ */
++TRACE_EVENT(kvm_inj_virq,
++ TP_PROTO(unsigned int irq),
++ TP_ARGS(irq),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, irq )
++ ),
++
++ TP_fast_assign(
++ __entry->irq = irq;
++ ),
++
++ TP_printk("irq %u", __entry->irq)
++);
++
++#define EXS(x) { x##_VECTOR, "#" #x }
++
++#define kvm_trace_sym_exc \
++ EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \
++ EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \
++ EXS(MF), EXS(MC)
++
++/*
++ * Tracepoint for kvm interrupt injection:
++ */
++TRACE_EVENT(kvm_inj_exception,
++ TP_PROTO(unsigned exception, bool has_error, unsigned error_code),
++ TP_ARGS(exception, has_error, error_code),
++
++ TP_STRUCT__entry(
++ __field( u8, exception )
++ __field( u8, has_error )
++ __field( u32, error_code )
++ ),
++
++ TP_fast_assign(
++ __entry->exception = exception;
++ __entry->has_error = has_error;
++ __entry->error_code = error_code;
++ ),
++
++ TP_printk("%s (0x%x)",
++ __print_symbolic(__entry->exception, kvm_trace_sym_exc),
++ /* FIXME: don't print error_code if not present */
++ __entry->has_error ? __entry->error_code : 0)
++);
++
++/*
++ * Tracepoint for page fault.
++ */
++TRACE_EVENT(kvm_page_fault,
++ TP_PROTO(unsigned long fault_address, unsigned int error_code),
++ TP_ARGS(fault_address, error_code),
++
++ TP_STRUCT__entry(
++ __field( unsigned long, fault_address )
++ __field( unsigned int, error_code )
++ ),
++
++ TP_fast_assign(
++ __entry->fault_address = fault_address;
++ __entry->error_code = error_code;
++ ),
++
++ TP_printk("address %lx error_code %x",
++ __entry->fault_address, __entry->error_code)
++);
++
++/*
++ * Tracepoint for guest MSR access.
++ */
++TRACE_EVENT(kvm_msr,
++ TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
++ TP_ARGS(write, ecx, data, exception),
++
++ TP_STRUCT__entry(
++ __field( unsigned, write )
++ __field( u32, ecx )
++ __field( u64, data )
++ __field( u8, exception )
++ ),
++
++ TP_fast_assign(
++ __entry->write = write;
++ __entry->ecx = ecx;
++ __entry->data = data;
++ __entry->exception = exception;
++ ),
++
++ TP_printk("msr_%s %x = 0x%llx%s",
++ __entry->write ? "write" : "read",
++ __entry->ecx, __entry->data,
++ __entry->exception ? " (#GP)" : "")
++);
++
++#define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data, false)
++#define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data, false)
++#define trace_kvm_msr_read_ex(ecx) trace_kvm_msr(0, ecx, 0, true)
++#define trace_kvm_msr_write_ex(ecx, data) trace_kvm_msr(1, ecx, data, true)
++
++/*
++ * Tracepoint for guest CR access.
++ */
++TRACE_EVENT(kvm_cr,
++ TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
++ TP_ARGS(rw, cr, val),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, rw )
++ __field( unsigned int, cr )
++ __field( unsigned long, val )
++ ),
++
++ TP_fast_assign(
++ __entry->rw = rw;
++ __entry->cr = cr;
++ __entry->val = val;
++ ),
++
++ TP_printk("cr_%s %x = 0x%lx",
++ __entry->rw ? "write" : "read",
++ __entry->cr, __entry->val)
++);
++
++#define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val)
++#define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val)
++
++TRACE_EVENT(kvm_pic_set_irq,
++ TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced),
++ TP_ARGS(chip, pin, elcr, imr, coalesced),
++
++ TP_STRUCT__entry(
++ __field( __u8, chip )
++ __field( __u8, pin )
++ __field( __u8, elcr )
++ __field( __u8, imr )
++ __field( bool, coalesced )
++ ),
++
++ TP_fast_assign(
++ __entry->chip = chip;
++ __entry->pin = pin;
++ __entry->elcr = elcr;
++ __entry->imr = imr;
++ __entry->coalesced = coalesced;
++ ),
++
++ TP_printk("chip %u pin %u (%s%s)%s",
++ __entry->chip, __entry->pin,
++ (__entry->elcr & (1 << __entry->pin)) ? "level":"edge",
++ (__entry->imr & (1 << __entry->pin)) ? "|masked":"",
++ __entry->coalesced ? " (coalesced)" : "")
++);
++
++#define kvm_apic_dst_shorthand \
++ {0x0, "dst"}, \
++ {0x1, "self"}, \
++ {0x2, "all"}, \
++ {0x3, "all-but-self"}
++
++TRACE_EVENT(kvm_apic_ipi,
++ TP_PROTO(__u32 icr_low, __u32 dest_id),
++ TP_ARGS(icr_low, dest_id),
++
++ TP_STRUCT__entry(
++ __field( __u32, icr_low )
++ __field( __u32, dest_id )
++ ),
++
++ TP_fast_assign(
++ __entry->icr_low = icr_low;
++ __entry->dest_id = dest_id;
++ ),
++
++ TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)",
++ __entry->dest_id, (u8)__entry->icr_low,
++ __print_symbolic((__entry->icr_low >> 8 & 0x7),
++ kvm_deliver_mode),
++ (__entry->icr_low & (1<<11)) ? "logical" : "physical",
++ (__entry->icr_low & (1<<14)) ? "assert" : "de-assert",
++ (__entry->icr_low & (1<<15)) ? "level" : "edge",
++ __print_symbolic((__entry->icr_low >> 18 & 0x3),
++ kvm_apic_dst_shorthand))
++);
++
++TRACE_EVENT(kvm_apic_accept_irq,
++ TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec, bool coalesced),
++ TP_ARGS(apicid, dm, tm, vec, coalesced),
++
++ TP_STRUCT__entry(
++ __field( __u32, apicid )
++ __field( __u16, dm )
++ __field( __u8, tm )
++ __field( __u8, vec )
++ __field( bool, coalesced )
++ ),
++
++ TP_fast_assign(
++ __entry->apicid = apicid;
++ __entry->dm = dm;
++ __entry->tm = tm;
++ __entry->vec = vec;
++ __entry->coalesced = coalesced;
++ ),
++
++ TP_printk("apicid %x vec %u (%s|%s)%s",
++ __entry->apicid, __entry->vec,
++ __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
++ __entry->tm ? "level" : "edge",
++ __entry->coalesced ? " (coalesced)" : "")
++);
++
++TRACE_EVENT(kvm_eoi,
++ TP_PROTO(struct kvm_lapic *apic, int vector),
++ TP_ARGS(apic, vector),
++
++ TP_STRUCT__entry(
++ __field( __u32, apicid )
++ __field( int, vector )
++ ),
++
++ TP_fast_assign(
++ __entry->apicid = apic->vcpu->vcpu_id;
++ __entry->vector = vector;
++ ),
++
++ TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
++);
++
++TRACE_EVENT(kvm_pv_eoi,
++ TP_PROTO(struct kvm_lapic *apic, int vector),
++ TP_ARGS(apic, vector),
++
++ TP_STRUCT__entry(
++ __field( __u32, apicid )
++ __field( int, vector )
++ ),
++
++ TP_fast_assign(
++ __entry->apicid = apic->vcpu->vcpu_id;
++ __entry->vector = vector;
++ ),
++
++ TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
++);
++
++/*
++ * Tracepoint for nested VMRUN
++ */
++TRACE_EVENT(kvm_nested_vmrun,
++ TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
++ __u32 event_inj, bool npt),
++ TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt),
++
++ TP_STRUCT__entry(
++ __field( __u64, rip )
++ __field( __u64, vmcb )
++ __field( __u64, nested_rip )
++ __field( __u32, int_ctl )
++ __field( __u32, event_inj )
++ __field( bool, npt )
++ ),
++
++ TP_fast_assign(
++ __entry->rip = rip;
++ __entry->vmcb = vmcb;
++ __entry->nested_rip = nested_rip;
++ __entry->int_ctl = int_ctl;
++ __entry->event_inj = event_inj;
++ __entry->npt = npt;
++ ),
++
++ TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x "
++ "event_inj: 0x%08x npt: %s",
++ __entry->rip, __entry->vmcb, __entry->nested_rip,
++ __entry->int_ctl, __entry->event_inj,
++ __entry->npt ? "on" : "off")
++);
++
++TRACE_EVENT(kvm_nested_intercepts,
++ TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept),
++ TP_ARGS(cr_read, cr_write, exceptions, intercept),
++
++ TP_STRUCT__entry(
++ __field( __u16, cr_read )
++ __field( __u16, cr_write )
++ __field( __u32, exceptions )
++ __field( __u64, intercept )
++ ),
++
++ TP_fast_assign(
++ __entry->cr_read = cr_read;
++ __entry->cr_write = cr_write;
++ __entry->exceptions = exceptions;
++ __entry->intercept = intercept;
++ ),
++
++ TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx",
++ __entry->cr_read, __entry->cr_write, __entry->exceptions,
++ __entry->intercept)
++);
++/*
++ * Tracepoint for #VMEXIT while nested
++ */
++TRACE_EVENT(kvm_nested_vmexit,
++ TP_PROTO(__u64 rip, __u32 exit_code,
++ __u64 exit_info1, __u64 exit_info2,
++ __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
++ TP_ARGS(rip, exit_code, exit_info1, exit_info2,
++ exit_int_info, exit_int_info_err, isa),
++
++ TP_STRUCT__entry(
++ __field( __u64, rip )
++ __field( __u32, exit_code )
++ __field( __u64, exit_info1 )
++ __field( __u64, exit_info2 )
++ __field( __u32, exit_int_info )
++ __field( __u32, exit_int_info_err )
++ __field( __u32, isa )
++ ),
++
++ TP_fast_assign(
++ __entry->rip = rip;
++ __entry->exit_code = exit_code;
++ __entry->exit_info1 = exit_info1;
++ __entry->exit_info2 = exit_info2;
++ __entry->exit_int_info = exit_int_info;
++ __entry->exit_int_info_err = exit_int_info_err;
++ __entry->isa = isa;
++ ),
++ TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx "
++ "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
++ __entry->rip,
++ (__entry->isa == KVM_ISA_VMX) ?
++ __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) :
++ __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS),
++ __entry->exit_info1, __entry->exit_info2,
++ __entry->exit_int_info, __entry->exit_int_info_err)
++);
++
++/*
++ * Tracepoint for #VMEXIT reinjected to the guest
++ */
++TRACE_EVENT(kvm_nested_vmexit_inject,
++ TP_PROTO(__u32 exit_code,
++ __u64 exit_info1, __u64 exit_info2,
++ __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
++ TP_ARGS(exit_code, exit_info1, exit_info2,
++ exit_int_info, exit_int_info_err, isa),
++
++ TP_STRUCT__entry(
++ __field( __u32, exit_code )
++ __field( __u64, exit_info1 )
++ __field( __u64, exit_info2 )
++ __field( __u32, exit_int_info )
++ __field( __u32, exit_int_info_err )
++ __field( __u32, isa )
++ ),
++
++ TP_fast_assign(
++ __entry->exit_code = exit_code;
++ __entry->exit_info1 = exit_info1;
++ __entry->exit_info2 = exit_info2;
++ __entry->exit_int_info = exit_int_info;
++ __entry->exit_int_info_err = exit_int_info_err;
++ __entry->isa = isa;
++ ),
++
++ TP_printk("reason: %s ext_inf1: 0x%016llx "
++ "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
++ (__entry->isa == KVM_ISA_VMX) ?
++ __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) :
++ __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS),
++ __entry->exit_info1, __entry->exit_info2,
++ __entry->exit_int_info, __entry->exit_int_info_err)
++);
++
++/*
++ * Tracepoint for nested #vmexit because of interrupt pending
++ */
++TRACE_EVENT(kvm_nested_intr_vmexit,
++ TP_PROTO(__u64 rip),
++ TP_ARGS(rip),
++
++ TP_STRUCT__entry(
++ __field( __u64, rip )
++ ),
++
++ TP_fast_assign(
++ __entry->rip = rip
++ ),
++
++ TP_printk("rip: 0x%016llx", __entry->rip)
++);
++
++/*
++ * Tracepoint for nested #vmexit because of interrupt pending
++ */
++TRACE_EVENT(kvm_invlpga,
++ TP_PROTO(__u64 rip, int asid, u64 address),
++ TP_ARGS(rip, asid, address),
++
++ TP_STRUCT__entry(
++ __field( __u64, rip )
++ __field( int, asid )
++ __field( __u64, address )
++ ),
++
++ TP_fast_assign(
++ __entry->rip = rip;
++ __entry->asid = asid;
++ __entry->address = address;
++ ),
++
++ TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
++ __entry->rip, __entry->asid, __entry->address)
++);
++
++/*
++ * Tracepoint for nested #vmexit because of interrupt pending
++ */
++TRACE_EVENT(kvm_skinit,
++ TP_PROTO(__u64 rip, __u32 slb),
++ TP_ARGS(rip, slb),
++
++ TP_STRUCT__entry(
++ __field( __u64, rip )
++ __field( __u32, slb )
++ ),
++
++ TP_fast_assign(
++ __entry->rip = rip;
++ __entry->slb = slb;
++ ),
++
++ TP_printk("rip: 0x%016llx slb: 0x%08x",
++ __entry->rip, __entry->slb)
++);
++
++#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
++#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
++#define KVM_EMUL_INSN_F_CS_D (1 << 2)
++#define KVM_EMUL_INSN_F_CS_L (1 << 3)
++
++#define kvm_trace_symbol_emul_flags \
++ { 0, "real" }, \
++ { KVM_EMUL_INSN_F_CR0_PE \
++ | KVM_EMUL_INSN_F_EFL_VM, "vm16" }, \
++ { KVM_EMUL_INSN_F_CR0_PE, "prot16" }, \
++ { KVM_EMUL_INSN_F_CR0_PE \
++ | KVM_EMUL_INSN_F_CS_D, "prot32" }, \
++ { KVM_EMUL_INSN_F_CR0_PE \
++ | KVM_EMUL_INSN_F_CS_L, "prot64" }
++
++#define kei_decode_mode(mode) ({ \
++ u8 flags = 0xff; \
++ switch (mode) { \
++ case X86EMUL_MODE_REAL: \
++ flags = 0; \
++ break; \
++ case X86EMUL_MODE_VM86: \
++ flags = KVM_EMUL_INSN_F_EFL_VM; \
++ break; \
++ case X86EMUL_MODE_PROT16: \
++ flags = KVM_EMUL_INSN_F_CR0_PE; \
++ break; \
++ case X86EMUL_MODE_PROT32: \
++ flags = KVM_EMUL_INSN_F_CR0_PE \
++ | KVM_EMUL_INSN_F_CS_D; \
++ break; \
++ case X86EMUL_MODE_PROT64: \
++ flags = KVM_EMUL_INSN_F_CR0_PE \
++ | KVM_EMUL_INSN_F_CS_L; \
++ break; \
++ } \
++ flags; \
++ })
++
++TRACE_EVENT(kvm_emulate_insn,
++ TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
++ TP_ARGS(vcpu, failed),
++
++ TP_STRUCT__entry(
++ __field( __u64, rip )
++ __field( __u32, csbase )
++ __field( __u8, len )
++ __array( __u8, insn, 15 )
++ __field( __u8, flags )
++ __field( __u8, failed )
++ ),
++
++ TP_fast_assign(
++ __entry->rip = vcpu->arch.emulate_ctxt.fetch.start;
++ __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
++ __entry->len = vcpu->arch.emulate_ctxt._eip
++ - vcpu->arch.emulate_ctxt.fetch.start;
++ memcpy(__entry->insn,
++ vcpu->arch.emulate_ctxt.fetch.data,
++ 15);
++ __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode);
++ __entry->failed = failed;
++ ),
++
++ TP_printk("%x:%llx:%s (%s)%s",
++ __entry->csbase, __entry->rip,
++ __print_hex(__entry->insn, __entry->len),
++ __print_symbolic(__entry->flags,
++ kvm_trace_symbol_emul_flags),
++ __entry->failed ? " failed" : ""
++ )
++ );
++
++#define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
++#define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
++
++TRACE_EVENT(
++ vcpu_match_mmio,
++ TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
++ TP_ARGS(gva, gpa, write, gpa_match),
++
++ TP_STRUCT__entry(
++ __field(gva_t, gva)
++ __field(gpa_t, gpa)
++ __field(bool, write)
++ __field(bool, gpa_match)
++ ),
++
++ TP_fast_assign(
++ __entry->gva = gva;
++ __entry->gpa = gpa;
++ __entry->write = write;
++ __entry->gpa_match = gpa_match
++ ),
++
++ TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
++ __entry->write ? "Write" : "Read",
++ __entry->gpa_match ? "GPA" : "GVA")
++);
++
++#ifdef CONFIG_X86_64
++
++#define host_clocks \
++ {VCLOCK_NONE, "none"}, \
++ {VCLOCK_TSC, "tsc"}, \
++ {VCLOCK_HPET, "hpet"} \
++
++TRACE_EVENT(kvm_update_master_clock,
++ TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched),
++ TP_ARGS(use_master_clock, host_clock, offset_matched),
++
++ TP_STRUCT__entry(
++ __field( bool, use_master_clock )
++ __field( unsigned int, host_clock )
++ __field( bool, offset_matched )
++ ),
++
++ TP_fast_assign(
++ __entry->use_master_clock = use_master_clock;
++ __entry->host_clock = host_clock;
++ __entry->offset_matched = offset_matched;
++ ),
++
++ TP_printk("masterclock %d hostclock %s offsetmatched %u",
++ __entry->use_master_clock,
++ __print_symbolic(__entry->host_clock, host_clocks),
++ __entry->offset_matched)
++);
++
++TRACE_EVENT(kvm_track_tsc,
++ TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched,
++ unsigned int online_vcpus, bool use_master_clock,
++ unsigned int host_clock),
++ TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock,
++ host_clock),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, vcpu_id )
++ __field( unsigned int, nr_vcpus_matched_tsc )
++ __field( unsigned int, online_vcpus )
++ __field( bool, use_master_clock )
++ __field( unsigned int, host_clock )
++ ),
++
++ TP_fast_assign(
++ __entry->vcpu_id = vcpu_id;
++ __entry->nr_vcpus_matched_tsc = nr_matched;
++ __entry->online_vcpus = online_vcpus;
++ __entry->use_master_clock = use_master_clock;
++ __entry->host_clock = host_clock;
++ ),
++
++ TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u"
++ " hostclock %s",
++ __entry->vcpu_id, __entry->use_master_clock,
++ __entry->nr_vcpus_matched_tsc, __entry->online_vcpus,
++ __print_symbolic(__entry->host_clock, host_clocks))
++);
++
++#endif /* CONFIG_X86_64 */
++
++#endif /* _TRACE_KVM_H */
++
++#undef TRACE_INCLUDE_PATH
++#define TRACE_INCLUDE_PATH arch/x86/kvm
++#undef TRACE_INCLUDE_FILE
++#define TRACE_INCLUDE_FILE trace
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/asoc.h
+@@ -0,0 +1,410 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM asoc
++
++#if !defined(_TRACE_ASOC_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_ASOC_H
++
++#include <linux/ktime.h>
++#include <linux/tracepoint.h>
++
++#define DAPM_DIRECT "(direct)"
++
++struct snd_soc_jack;
++struct snd_soc_codec;
++struct snd_soc_platform;
++struct snd_soc_card;
++struct snd_soc_dapm_widget;
++
++/*
++ * Log register events
++ */
++DECLARE_EVENT_CLASS(snd_soc_reg,
++
++ TP_PROTO(struct snd_soc_codec *codec, unsigned int reg,
++ unsigned int val),
++
++ TP_ARGS(codec, reg, val),
++
++ TP_STRUCT__entry(
++ __string( name, codec->name )
++ __field( int, id )
++ __field( unsigned int, reg )
++ __field( unsigned int, val )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, codec->name);
++ __entry->id = codec->id;
++ __entry->reg = reg;
++ __entry->val = val;
++ ),
++
++ TP_printk("codec=%s.%d reg=%x val=%x", __get_str(name),
++ (int)__entry->id, (unsigned int)__entry->reg,
++ (unsigned int)__entry->val)
++);
++
++DEFINE_EVENT(snd_soc_reg, snd_soc_reg_write,
++
++ TP_PROTO(struct snd_soc_codec *codec, unsigned int reg,
++ unsigned int val),
++
++ TP_ARGS(codec, reg, val)
++
++);
++
++DEFINE_EVENT(snd_soc_reg, snd_soc_reg_read,
++
++ TP_PROTO(struct snd_soc_codec *codec, unsigned int reg,
++ unsigned int val),
++
++ TP_ARGS(codec, reg, val)
++
++);
++
++DECLARE_EVENT_CLASS(snd_soc_preg,
++
++ TP_PROTO(struct snd_soc_platform *platform, unsigned int reg,
++ unsigned int val),
++
++ TP_ARGS(platform, reg, val),
++
++ TP_STRUCT__entry(
++ __string( name, platform->name )
++ __field( int, id )
++ __field( unsigned int, reg )
++ __field( unsigned int, val )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, platform->name);
++ __entry->id = platform->id;
++ __entry->reg = reg;
++ __entry->val = val;
++ ),
++
++ TP_printk("platform=%s.%d reg=%x val=%x", __get_str(name),
++ (int)__entry->id, (unsigned int)__entry->reg,
++ (unsigned int)__entry->val)
++);
++
++DEFINE_EVENT(snd_soc_preg, snd_soc_preg_write,
++
++ TP_PROTO(struct snd_soc_platform *platform, unsigned int reg,
++ unsigned int val),
++
++ TP_ARGS(platform, reg, val)
++
++);
++
++DEFINE_EVENT(snd_soc_preg, snd_soc_preg_read,
++
++ TP_PROTO(struct snd_soc_platform *platform, unsigned int reg,
++ unsigned int val),
++
++ TP_ARGS(platform, reg, val)
++
++);
++
++DECLARE_EVENT_CLASS(snd_soc_card,
++
++ TP_PROTO(struct snd_soc_card *card, int val),
++
++ TP_ARGS(card, val),
++
++ TP_STRUCT__entry(
++ __string( name, card->name )
++ __field( int, val )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, card->name);
++ __entry->val = val;
++ ),
++
++ TP_printk("card=%s val=%d", __get_str(name), (int)__entry->val)
++);
++
++DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_start,
++
++ TP_PROTO(struct snd_soc_card *card, int val),
++
++ TP_ARGS(card, val)
++
++);
++
++DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_done,
++
++ TP_PROTO(struct snd_soc_card *card, int val),
++
++ TP_ARGS(card, val)
++
++);
++
++DECLARE_EVENT_CLASS(snd_soc_dapm_basic,
++
++ TP_PROTO(struct snd_soc_card *card),
++
++ TP_ARGS(card),
++
++ TP_STRUCT__entry(
++ __string( name, card->name )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, card->name);
++ ),
++
++ TP_printk("card=%s", __get_str(name))
++);
++
++DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_start,
++
++ TP_PROTO(struct snd_soc_card *card),
++
++ TP_ARGS(card)
++
++);
++
++DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_done,
++
++ TP_PROTO(struct snd_soc_card *card),
++
++ TP_ARGS(card)
++
++);
++
++DECLARE_EVENT_CLASS(snd_soc_dapm_widget,
++
++ TP_PROTO(struct snd_soc_dapm_widget *w, int val),
++
++ TP_ARGS(w, val),
++
++ TP_STRUCT__entry(
++ __string( name, w->name )
++ __field( int, val )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, w->name);
++ __entry->val = val;
++ ),
++
++ TP_printk("widget=%s val=%d", __get_str(name),
++ (int)__entry->val)
++);
++
++DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_power,
++
++ TP_PROTO(struct snd_soc_dapm_widget *w, int val),
++
++ TP_ARGS(w, val)
++
++);
++
++DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_event_start,
++
++ TP_PROTO(struct snd_soc_dapm_widget *w, int val),
++
++ TP_ARGS(w, val)
++
++);
++
++DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_event_done,
++
++ TP_PROTO(struct snd_soc_dapm_widget *w, int val),
++
++ TP_ARGS(w, val)
++
++);
++
++TRACE_EVENT(snd_soc_dapm_walk_done,
++
++ TP_PROTO(struct snd_soc_card *card),
++
++ TP_ARGS(card),
++
++ TP_STRUCT__entry(
++ __string( name, card->name )
++ __field( int, power_checks )
++ __field( int, path_checks )
++ __field( int, neighbour_checks )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, card->name);
++ __entry->power_checks = card->dapm_stats.power_checks;
++ __entry->path_checks = card->dapm_stats.path_checks;
++ __entry->neighbour_checks = card->dapm_stats.neighbour_checks;
++ ),
++
++ TP_printk("%s: checks %d power, %d path, %d neighbour",
++ __get_str(name), (int)__entry->power_checks,
++ (int)__entry->path_checks, (int)__entry->neighbour_checks)
++);
++
++TRACE_EVENT(snd_soc_dapm_output_path,
++
++ TP_PROTO(struct snd_soc_dapm_widget *widget,
++ struct snd_soc_dapm_path *path),
++
++ TP_ARGS(widget, path),
++
++ TP_STRUCT__entry(
++ __string( wname, widget->name )
++ __string( pname, path->name ? path->name : DAPM_DIRECT)
++ __string( psname, path->sink->name )
++ __field( int, path_sink )
++ __field( int, path_connect )
++ ),
++
++ TP_fast_assign(
++ __assign_str(wname, widget->name);
++ __assign_str(pname, path->name ? path->name : DAPM_DIRECT);
++ __assign_str(psname, path->sink->name);
++ __entry->path_connect = path->connect;
++ __entry->path_sink = (long)path->sink;
++ ),
++
++ TP_printk("%c%s -> %s -> %s\n",
++ (int) __entry->path_sink &&
++ (int) __entry->path_connect ? '*' : ' ',
++ __get_str(wname), __get_str(pname), __get_str(psname))
++);
++
++TRACE_EVENT(snd_soc_dapm_input_path,
++
++ TP_PROTO(struct snd_soc_dapm_widget *widget,
++ struct snd_soc_dapm_path *path),
++
++ TP_ARGS(widget, path),
++
++ TP_STRUCT__entry(
++ __string( wname, widget->name )
++ __string( pname, path->name ? path->name : DAPM_DIRECT)
++ __string( psname, path->source->name )
++ __field( int, path_source )
++ __field( int, path_connect )
++ ),
++
++ TP_fast_assign(
++ __assign_str(wname, widget->name);
++ __assign_str(pname, path->name ? path->name : DAPM_DIRECT);
++ __assign_str(psname, path->source->name);
++ __entry->path_connect = path->connect;
++ __entry->path_source = (long)path->source;
++ ),
++
++ TP_printk("%c%s <- %s <- %s\n",
++ (int) __entry->path_source &&
++ (int) __entry->path_connect ? '*' : ' ',
++ __get_str(wname), __get_str(pname), __get_str(psname))
++);
++
++TRACE_EVENT(snd_soc_dapm_connected,
++
++ TP_PROTO(int paths, int stream),
++
++ TP_ARGS(paths, stream),
++
++ TP_STRUCT__entry(
++ __field( int, paths )
++ __field( int, stream )
++ ),
++
++ TP_fast_assign(
++ __entry->paths = paths;
++ __entry->stream = stream;
++ ),
++
++ TP_printk("%s: found %d paths\n",
++ __entry->stream ? "capture" : "playback", __entry->paths)
++);
++
++TRACE_EVENT(snd_soc_jack_irq,
++
++ TP_PROTO(const char *name),
++
++ TP_ARGS(name),
++
++ TP_STRUCT__entry(
++ __string( name, name )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, name);
++ ),
++
++ TP_printk("%s", __get_str(name))
++);
++
++TRACE_EVENT(snd_soc_jack_report,
++
++ TP_PROTO(struct snd_soc_jack *jack, int mask, int val),
++
++ TP_ARGS(jack, mask, val),
++
++ TP_STRUCT__entry(
++ __string( name, jack->jack->name )
++ __field( int, mask )
++ __field( int, val )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, jack->jack->name);
++ __entry->mask = mask;
++ __entry->val = val;
++ ),
++
++ TP_printk("jack=%s %x/%x", __get_str(name), (int)__entry->val,
++ (int)__entry->mask)
++);
++
++TRACE_EVENT(snd_soc_jack_notify,
++
++ TP_PROTO(struct snd_soc_jack *jack, int val),
++
++ TP_ARGS(jack, val),
++
++ TP_STRUCT__entry(
++ __string( name, jack->jack->name )
++ __field( int, val )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, jack->jack->name);
++ __entry->val = val;
++ ),
++
++ TP_printk("jack=%s %x", __get_str(name), (int)__entry->val)
++);
++
++TRACE_EVENT(snd_soc_cache_sync,
++
++ TP_PROTO(struct snd_soc_codec *codec, const char *type,
++ const char *status),
++
++ TP_ARGS(codec, type, status),
++
++ TP_STRUCT__entry(
++ __string( name, codec->name )
++ __string( status, status )
++ __string( type, type )
++ __field( int, id )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, codec->name);
++ __assign_str(status, status);
++ __assign_str(type, type);
++ __entry->id = codec->id;
++ ),
++
++ TP_printk("codec=%s.%d type=%s status=%s", __get_str(name),
++ (int)__entry->id, __get_str(type), __get_str(status))
++);
++
++#endif /* _TRACE_ASOC_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/block.h
+@@ -0,0 +1,571 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM block
++
++#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_BLOCK_H
++
++#include <linux/blktrace_api.h>
++#include <linux/blkdev.h>
++#include <linux/tracepoint.h>
++
++#define RWBS_LEN 8
++
++DECLARE_EVENT_CLASS(block_rq_with_error,
++
++ TP_PROTO(struct request_queue *q, struct request *rq),
++
++ TP_ARGS(q, rq),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( unsigned int, nr_sector )
++ __field( int, errors )
++ __array( char, rwbs, RWBS_LEN )
++ __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
++ __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++ 0 : blk_rq_pos(rq);
++ __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++ 0 : blk_rq_sectors(rq);
++ __entry->errors = rq->errors;
++
++ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
++ blk_dump_cmd(__get_str(cmd), rq);
++ ),
++
++ TP_printk("%d,%d %s (%s) %llu + %u [%d]",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->rwbs, __get_str(cmd),
++ (unsigned long long)__entry->sector,
++ __entry->nr_sector, __entry->errors)
++);
++
++/**
++ * block_rq_abort - abort block operation request
++ * @q: queue containing the block operation request
++ * @rq: block IO operation request
++ *
++ * Called immediately after pending block IO operation request @rq in
++ * queue @q is aborted. The fields in the operation request @rq
++ * can be examined to determine which device and sectors the pending
++ * operation would access.
++ */
++DEFINE_EVENT(block_rq_with_error, block_rq_abort,
++
++ TP_PROTO(struct request_queue *q, struct request *rq),
++
++ TP_ARGS(q, rq)
++);
++
++/**
++ * block_rq_requeue - place block IO request back on a queue
++ * @q: queue holding operation
++ * @rq: block IO operation request
++ *
++ * The block operation request @rq is being placed back into queue
++ * @q. For some reason the request was not completed and needs to be
++ * put back in the queue.
++ */
++DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
++
++ TP_PROTO(struct request_queue *q, struct request *rq),
++
++ TP_ARGS(q, rq)
++);
++
++/**
++ * block_rq_complete - block IO operation completed by device driver
++ * @q: queue containing the block operation request
++ * @rq: block operations request
++ *
++ * The block_rq_complete tracepoint event indicates that some portion
++ * of operation request has been completed by the device driver. If
++ * the @rq->bio is %NULL, then there is absolutely no additional work to
++ * do for the request. If @rq->bio is non-NULL then there is
++ * additional work required to complete the request.
++ */
++DEFINE_EVENT(block_rq_with_error, block_rq_complete,
++
++ TP_PROTO(struct request_queue *q, struct request *rq),
++
++ TP_ARGS(q, rq)
++);
++
++DECLARE_EVENT_CLASS(block_rq,
++
++ TP_PROTO(struct request_queue *q, struct request *rq),
++
++ TP_ARGS(q, rq),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( unsigned int, nr_sector )
++ __field( unsigned int, bytes )
++ __array( char, rwbs, RWBS_LEN )
++ __array( char, comm, TASK_COMM_LEN )
++ __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
++ __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++ 0 : blk_rq_pos(rq);
++ __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++ 0 : blk_rq_sectors(rq);
++ __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
++ blk_rq_bytes(rq) : 0;
++
++ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
++ blk_dump_cmd(__get_str(cmd), rq);
++ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
++ ),
++
++ TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->rwbs, __entry->bytes, __get_str(cmd),
++ (unsigned long long)__entry->sector,
++ __entry->nr_sector, __entry->comm)
++);
++
++/**
++ * block_rq_insert - insert block operation request into queue
++ * @q: target queue
++ * @rq: block IO operation request
++ *
++ * Called immediately before block operation request @rq is inserted
++ * into queue @q. The fields in the operation request @rq struct can
++ * be examined to determine which device and sectors the pending
++ * operation would access.
++ */
++DEFINE_EVENT(block_rq, block_rq_insert,
++
++ TP_PROTO(struct request_queue *q, struct request *rq),
++
++ TP_ARGS(q, rq)
++);
++
++/**
++ * block_rq_issue - issue pending block IO request operation to device driver
++ * @q: queue holding operation
++ * @rq: block IO operation operation request
++ *
++ * Called when block operation request @rq from queue @q is sent to a
++ * device driver for processing.
++ */
++DEFINE_EVENT(block_rq, block_rq_issue,
++
++ TP_PROTO(struct request_queue *q, struct request *rq),
++
++ TP_ARGS(q, rq)
++);
++
++/**
++ * block_bio_bounce - used bounce buffer when processing block operation
++ * @q: queue holding the block operation
++ * @bio: block operation
++ *
++ * A bounce buffer was used to handle the block operation @bio in @q.
++ * This occurs when hardware limitations prevent a direct transfer of
++ * data between the @bio data memory area and the IO device. Use of a
++ * bounce buffer requires extra copying of data and decreases
++ * performance.
++ */
++TRACE_EVENT(block_bio_bounce,
++
++ TP_PROTO(struct request_queue *q, struct bio *bio),
++
++ TP_ARGS(q, bio),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( unsigned int, nr_sector )
++ __array( char, rwbs, RWBS_LEN )
++ __array( char, comm, TASK_COMM_LEN )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = bio->bi_bdev ?
++ bio->bi_bdev->bd_dev : 0;
++ __entry->sector = bio->bi_sector;
++ __entry->nr_sector = bio->bi_size >> 9;
++ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
++ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
++ ),
++
++ TP_printk("%d,%d %s %llu + %u [%s]",
++ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
++ (unsigned long long)__entry->sector,
++ __entry->nr_sector, __entry->comm)
++);
++
++/**
++ * block_bio_complete - completed all work on the block operation
++ * @q: queue holding the block operation
++ * @bio: block operation completed
++ * @error: io error value
++ *
++ * This tracepoint indicates there is no further work to do on this
++ * block IO operation @bio.
++ */
++TRACE_EVENT(block_bio_complete,
++
++ TP_PROTO(struct request_queue *q, struct bio *bio, int error),
++
++ TP_ARGS(q, bio, error),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( unsigned, nr_sector )
++ __field( int, error )
++ __array( char, rwbs, RWBS_LEN)
++ ),
++
++ TP_fast_assign(
++ __entry->dev = bio->bi_bdev->bd_dev;
++ __entry->sector = bio->bi_sector;
++ __entry->nr_sector = bio->bi_size >> 9;
++ __entry->error = error;
++ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
++ ),
++
++ TP_printk("%d,%d %s %llu + %u [%d]",
++ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
++ (unsigned long long)__entry->sector,
++ __entry->nr_sector, __entry->error)
++);
++
++DECLARE_EVENT_CLASS(block_bio,
++
++ TP_PROTO(struct request_queue *q, struct bio *bio),
++
++ TP_ARGS(q, bio),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( unsigned int, nr_sector )
++ __array( char, rwbs, RWBS_LEN )
++ __array( char, comm, TASK_COMM_LEN )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = bio->bi_bdev->bd_dev;
++ __entry->sector = bio->bi_sector;
++ __entry->nr_sector = bio->bi_size >> 9;
++ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
++ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
++ ),
++
++ TP_printk("%d,%d %s %llu + %u [%s]",
++ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
++ (unsigned long long)__entry->sector,
++ __entry->nr_sector, __entry->comm)
++);
++
++/**
++ * block_bio_backmerge - merging block operation to the end of an existing operation
++ * @q: queue holding operation
++ * @bio: new block operation to merge
++ *
++ * Merging block request @bio to the end of an existing block request
++ * in queue @q.
++ */
++DEFINE_EVENT(block_bio, block_bio_backmerge,
++
++ TP_PROTO(struct request_queue *q, struct bio *bio),
++
++ TP_ARGS(q, bio)
++);
++
++/**
++ * block_bio_frontmerge - merging block operation to the beginning of an existing operation
++ * @q: queue holding operation
++ * @bio: new block operation to merge
++ *
++ * Merging block IO operation @bio to the beginning of an existing block
++ * operation in queue @q.
++ */
++DEFINE_EVENT(block_bio, block_bio_frontmerge,
++
++ TP_PROTO(struct request_queue *q, struct bio *bio),
++
++ TP_ARGS(q, bio)
++);
++
++/**
++ * block_bio_queue - putting new block IO operation in queue
++ * @q: queue holding operation
++ * @bio: new block operation
++ *
++ * About to place the block IO operation @bio into queue @q.
++ */
++DEFINE_EVENT(block_bio, block_bio_queue,
++
++ TP_PROTO(struct request_queue *q, struct bio *bio),
++
++ TP_ARGS(q, bio)
++);
++
++DECLARE_EVENT_CLASS(block_get_rq,
++
++ TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
++
++ TP_ARGS(q, bio, rw),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( unsigned int, nr_sector )
++ __array( char, rwbs, RWBS_LEN )
++ __array( char, comm, TASK_COMM_LEN )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
++ __entry->sector = bio ? bio->bi_sector : 0;
++ __entry->nr_sector = bio ? bio->bi_size >> 9 : 0;
++ blk_fill_rwbs(__entry->rwbs,
++ bio ? bio->bi_rw : 0, __entry->nr_sector);
++ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
++ ),
++
++ TP_printk("%d,%d %s %llu + %u [%s]",
++ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
++ (unsigned long long)__entry->sector,
++ __entry->nr_sector, __entry->comm)
++);
++
++/**
++ * block_getrq - get a free request entry in queue for block IO operations
++ * @q: queue for operations
++ * @bio: pending block IO operation
++ * @rw: low bit indicates a read (%0) or a write (%1)
++ *
++ * A request struct for queue @q has been allocated to handle the
++ * block IO operation @bio.
++ */
++DEFINE_EVENT(block_get_rq, block_getrq,
++
++ TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
++
++ TP_ARGS(q, bio, rw)
++);
++
++/**
++ * block_sleeprq - waiting to get a free request entry in queue for block IO operation
++ * @q: queue for operation
++ * @bio: pending block IO operation
++ * @rw: low bit indicates a read (%0) or a write (%1)
++ *
++ * In the case where a request struct cannot be provided for queue @q
++ * the process needs to wait for an request struct to become
++ * available. This tracepoint event is generated each time the
++ * process goes to sleep waiting for request struct become available.
++ */
++DEFINE_EVENT(block_get_rq, block_sleeprq,
++
++ TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
++
++ TP_ARGS(q, bio, rw)
++);
++
++/**
++ * block_plug - keep operations requests in request queue
++ * @q: request queue to plug
++ *
++ * Plug the request queue @q. Do not allow block operation requests
++ * to be sent to the device driver. Instead, accumulate requests in
++ * the queue to improve throughput performance of the block device.
++ */
++TRACE_EVENT(block_plug,
++
++ TP_PROTO(struct request_queue *q),
++
++ TP_ARGS(q),
++
++ TP_STRUCT__entry(
++ __array( char, comm, TASK_COMM_LEN )
++ ),
++
++ TP_fast_assign(
++ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
++ ),
++
++ TP_printk("[%s]", __entry->comm)
++);
++
++DECLARE_EVENT_CLASS(block_unplug,
++
++ TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
++
++ TP_ARGS(q, depth, explicit),
++
++ TP_STRUCT__entry(
++ __field( int, nr_rq )
++ __array( char, comm, TASK_COMM_LEN )
++ ),
++
++ TP_fast_assign(
++ __entry->nr_rq = depth;
++ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
++ ),
++
++ TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
++);
++
++/**
++ * block_unplug - release of operations requests in request queue
++ * @q: request queue to unplug
++ * @depth: number of requests just added to the queue
++ * @explicit: whether this was an explicit unplug, or one from schedule()
++ *
++ * Unplug request queue @q because device driver is scheduled to work
++ * on elements in the request queue.
++ */
++DEFINE_EVENT(block_unplug, block_unplug,
++
++ TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
++
++ TP_ARGS(q, depth, explicit)
++);
++
++/**
++ * block_split - split a single bio struct into two bio structs
++ * @q: queue containing the bio
++ * @bio: block operation being split
++ * @new_sector: The starting sector for the new bio
++ *
++ * The bio request @bio in request queue @q needs to be split into two
++ * bio requests. The newly created @bio request starts at
++ * @new_sector. This split may be required due to hardware limitation
++ * such as operation crossing device boundaries in a RAID system.
++ */
++TRACE_EVENT(block_split,
++
++ TP_PROTO(struct request_queue *q, struct bio *bio,
++ unsigned int new_sector),
++
++ TP_ARGS(q, bio, new_sector),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( sector_t, new_sector )
++ __array( char, rwbs, RWBS_LEN )
++ __array( char, comm, TASK_COMM_LEN )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = bio->bi_bdev->bd_dev;
++ __entry->sector = bio->bi_sector;
++ __entry->new_sector = new_sector;
++ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
++ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
++ ),
++
++ TP_printk("%d,%d %s %llu / %llu [%s]",
++ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
++ (unsigned long long)__entry->sector,
++ (unsigned long long)__entry->new_sector,
++ __entry->comm)
++);
++
++/**
++ * block_bio_remap - map request for a logical device to the raw device
++ * @q: queue holding the operation
++ * @bio: revised operation
++ * @dev: device for the operation
++ * @from: original sector for the operation
++ *
++ * An operation for a logical device has been mapped to the
++ * raw block device.
++ */
++TRACE_EVENT(block_bio_remap,
++
++ TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
++ sector_t from),
++
++ TP_ARGS(q, bio, dev, from),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( unsigned int, nr_sector )
++ __field( dev_t, old_dev )
++ __field( sector_t, old_sector )
++ __array( char, rwbs, RWBS_LEN)
++ ),
++
++ TP_fast_assign(
++ __entry->dev = bio->bi_bdev->bd_dev;
++ __entry->sector = bio->bi_sector;
++ __entry->nr_sector = bio->bi_size >> 9;
++ __entry->old_dev = dev;
++ __entry->old_sector = from;
++ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
++ ),
++
++ TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
++ (unsigned long long)__entry->sector,
++ __entry->nr_sector,
++ MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
++ (unsigned long long)__entry->old_sector)
++);
++
++/**
++ * block_rq_remap - map request for a block operation request
++ * @q: queue holding the operation
++ * @rq: block IO operation request
++ * @dev: device for the operation
++ * @from: original sector for the operation
++ *
++ * The block operation request @rq in @q has been remapped. The block
++ * operation request @rq holds the current information and @from hold
++ * the original sector.
++ */
++TRACE_EVENT(block_rq_remap,
++
++ TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
++ sector_t from),
++
++ TP_ARGS(q, rq, dev, from),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( unsigned int, nr_sector )
++ __field( dev_t, old_dev )
++ __field( sector_t, old_sector )
++ __array( char, rwbs, RWBS_LEN)
++ ),
++
++ TP_fast_assign(
++ __entry->dev = disk_devt(rq->rq_disk);
++ __entry->sector = blk_rq_pos(rq);
++ __entry->nr_sector = blk_rq_sectors(rq);
++ __entry->old_dev = dev;
++ __entry->old_sector = from;
++ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
++ ),
++
++ TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
++ (unsigned long long)__entry->sector,
++ __entry->nr_sector,
++ MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
++ (unsigned long long)__entry->old_sector)
++);
++
++#endif /* _TRACE_BLOCK_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
++
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/btrfs.h
+@@ -0,0 +1,918 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM btrfs
++
++#if !defined(_TRACE_BTRFS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_BTRFS_H
++
++#include <linux/writeback.h>
++#include <linux/tracepoint.h>
++#include <trace/events/gfpflags.h>
++
++struct btrfs_root;
++struct btrfs_fs_info;
++struct btrfs_inode;
++struct extent_map;
++struct btrfs_ordered_extent;
++struct btrfs_delayed_ref_node;
++struct btrfs_delayed_tree_ref;
++struct btrfs_delayed_data_ref;
++struct btrfs_delayed_ref_head;
++struct btrfs_block_group_cache;
++struct btrfs_free_cluster;
++struct map_lookup;
++struct extent_buffer;
++
++#define show_ref_type(type) \
++ __print_symbolic(type, \
++ { BTRFS_TREE_BLOCK_REF_KEY, "TREE_BLOCK_REF" }, \
++ { BTRFS_EXTENT_DATA_REF_KEY, "EXTENT_DATA_REF" }, \
++ { BTRFS_EXTENT_REF_V0_KEY, "EXTENT_REF_V0" }, \
++ { BTRFS_SHARED_BLOCK_REF_KEY, "SHARED_BLOCK_REF" }, \
++ { BTRFS_SHARED_DATA_REF_KEY, "SHARED_DATA_REF" })
++
++#define __show_root_type(obj) \
++ __print_symbolic_u64(obj, \
++ { BTRFS_ROOT_TREE_OBJECTID, "ROOT_TREE" }, \
++ { BTRFS_EXTENT_TREE_OBJECTID, "EXTENT_TREE" }, \
++ { BTRFS_CHUNK_TREE_OBJECTID, "CHUNK_TREE" }, \
++ { BTRFS_DEV_TREE_OBJECTID, "DEV_TREE" }, \
++ { BTRFS_FS_TREE_OBJECTID, "FS_TREE" }, \
++ { BTRFS_ROOT_TREE_DIR_OBJECTID, "ROOT_TREE_DIR" }, \
++ { BTRFS_CSUM_TREE_OBJECTID, "CSUM_TREE" }, \
++ { BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" }, \
++ { BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \
++ { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
++
++#define show_root_type(obj) \
++ obj, ((obj >= BTRFS_DATA_RELOC_TREE_OBJECTID) || \
++ (obj <= BTRFS_CSUM_TREE_OBJECTID )) ? __show_root_type(obj) : "-"
++
++#define BTRFS_GROUP_FLAGS \
++ { BTRFS_BLOCK_GROUP_DATA, "DATA"}, \
++ { BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \
++ { BTRFS_BLOCK_GROUP_METADATA, "METADATA"}, \
++ { BTRFS_BLOCK_GROUP_RAID0, "RAID0"}, \
++ { BTRFS_BLOCK_GROUP_RAID1, "RAID1"}, \
++ { BTRFS_BLOCK_GROUP_DUP, "DUP"}, \
++ { BTRFS_BLOCK_GROUP_RAID10, "RAID10"}
++
++#define BTRFS_UUID_SIZE 16
++
++TRACE_EVENT(btrfs_transaction_commit,
++
++ TP_PROTO(struct btrfs_root *root),
++
++ TP_ARGS(root),
++
++ TP_STRUCT__entry(
++ __field( u64, generation )
++ __field( u64, root_objectid )
++ ),
++
++ TP_fast_assign(
++ __entry->generation = root->fs_info->generation;
++ __entry->root_objectid = root->root_key.objectid;
++ ),
++
++ TP_printk("root = %llu(%s), gen = %llu",
++ show_root_type(__entry->root_objectid),
++ (unsigned long long)__entry->generation)
++);
++
++DECLARE_EVENT_CLASS(btrfs__inode,
++
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( blkcnt_t, blocks )
++ __field( u64, disk_i_size )
++ __field( u64, generation )
++ __field( u64, last_trans )
++ __field( u64, logged_trans )
++ __field( u64, root_objectid )
++ ),
++
++ TP_fast_assign(
++ __entry->ino = inode->i_ino;
++ __entry->blocks = inode->i_blocks;
++ __entry->disk_i_size = BTRFS_I(inode)->disk_i_size;
++ __entry->generation = BTRFS_I(inode)->generation;
++ __entry->last_trans = BTRFS_I(inode)->last_trans;
++ __entry->logged_trans = BTRFS_I(inode)->logged_trans;
++ __entry->root_objectid =
++ BTRFS_I(inode)->root->root_key.objectid;
++ ),
++
++ TP_printk("root = %llu(%s), gen = %llu, ino = %lu, blocks = %llu, "
++ "disk_i_size = %llu, last_trans = %llu, logged_trans = %llu",
++ show_root_type(__entry->root_objectid),
++ (unsigned long long)__entry->generation,
++ (unsigned long)__entry->ino,
++ (unsigned long long)__entry->blocks,
++ (unsigned long long)__entry->disk_i_size,
++ (unsigned long long)__entry->last_trans,
++ (unsigned long long)__entry->logged_trans)
++);
++
++DEFINE_EVENT(btrfs__inode, btrfs_inode_new,
++
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode)
++);
++
++DEFINE_EVENT(btrfs__inode, btrfs_inode_request,
++
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode)
++);
++
++DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
++
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode)
++);
++
++#define __show_map_type(type) \
++ __print_symbolic_u64(type, \
++ { EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \
++ { EXTENT_MAP_HOLE, "HOLE" }, \
++ { EXTENT_MAP_INLINE, "INLINE" }, \
++ { EXTENT_MAP_DELALLOC, "DELALLOC" })
++
++#define show_map_type(type) \
++ type, (type >= EXTENT_MAP_LAST_BYTE) ? "-" : __show_map_type(type)
++
++#define show_map_flags(flag) \
++ __print_flags(flag, "|", \
++ { EXTENT_FLAG_PINNED, "PINNED" }, \
++ { EXTENT_FLAG_COMPRESSED, "COMPRESSED" }, \
++ { EXTENT_FLAG_VACANCY, "VACANCY" }, \
++ { EXTENT_FLAG_PREALLOC, "PREALLOC" })
++
++TRACE_EVENT(btrfs_get_extent,
++
++ TP_PROTO(struct btrfs_root *root, struct extent_map *map),
++
++ TP_ARGS(root, map),
++
++ TP_STRUCT__entry(
++ __field( u64, root_objectid )
++ __field( u64, start )
++ __field( u64, len )
++ __field( u64, orig_start )
++ __field( u64, block_start )
++ __field( u64, block_len )
++ __field( unsigned long, flags )
++ __field( int, refs )
++ __field( unsigned int, compress_type )
++ ),
++
++ TP_fast_assign(
++ __entry->root_objectid = root->root_key.objectid;
++ __entry->start = map->start;
++ __entry->len = map->len;
++ __entry->orig_start = map->orig_start;
++ __entry->block_start = map->block_start;
++ __entry->block_len = map->block_len;
++ __entry->flags = map->flags;
++ __entry->refs = atomic_read(&map->refs);
++ __entry->compress_type = map->compress_type;
++ ),
++
++ TP_printk("root = %llu(%s), start = %llu, len = %llu, "
++ "orig_start = %llu, block_start = %llu(%s), "
++ "block_len = %llu, flags = %s, refs = %u, "
++ "compress_type = %u",
++ show_root_type(__entry->root_objectid),
++ (unsigned long long)__entry->start,
++ (unsigned long long)__entry->len,
++ (unsigned long long)__entry->orig_start,
++ show_map_type(__entry->block_start),
++ (unsigned long long)__entry->block_len,
++ show_map_flags(__entry->flags),
++ __entry->refs, __entry->compress_type)
++);
++
++#define show_ordered_flags(flags) \
++ __print_symbolic(flags, \
++ { BTRFS_ORDERED_IO_DONE, "IO_DONE" }, \
++ { BTRFS_ORDERED_COMPLETE, "COMPLETE" }, \
++ { BTRFS_ORDERED_NOCOW, "NOCOW" }, \
++ { BTRFS_ORDERED_COMPRESSED, "COMPRESSED" }, \
++ { BTRFS_ORDERED_PREALLOC, "PREALLOC" }, \
++ { BTRFS_ORDERED_DIRECT, "DIRECT" })
++
++DECLARE_EVENT_CLASS(btrfs__ordered_extent,
++
++ TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
++
++ TP_ARGS(inode, ordered),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( u64, file_offset )
++ __field( u64, start )
++ __field( u64, len )
++ __field( u64, disk_len )
++ __field( u64, bytes_left )
++ __field( unsigned long, flags )
++ __field( int, compress_type )
++ __field( int, refs )
++ __field( u64, root_objectid )
++ ),
++
++ TP_fast_assign(
++ __entry->ino = inode->i_ino;
++ __entry->file_offset = ordered->file_offset;
++ __entry->start = ordered->start;
++ __entry->len = ordered->len;
++ __entry->disk_len = ordered->disk_len;
++ __entry->bytes_left = ordered->bytes_left;
++ __entry->flags = ordered->flags;
++ __entry->compress_type = ordered->compress_type;
++ __entry->refs = atomic_read(&ordered->refs);
++ __entry->root_objectid =
++ BTRFS_I(inode)->root->root_key.objectid;
++ ),
++
++ TP_printk("root = %llu(%s), ino = %llu, file_offset = %llu, "
++ "start = %llu, len = %llu, disk_len = %llu, "
++ "bytes_left = %llu, flags = %s, compress_type = %d, "
++ "refs = %d",
++ show_root_type(__entry->root_objectid),
++ (unsigned long long)__entry->ino,
++ (unsigned long long)__entry->file_offset,
++ (unsigned long long)__entry->start,
++ (unsigned long long)__entry->len,
++ (unsigned long long)__entry->disk_len,
++ (unsigned long long)__entry->bytes_left,
++ show_ordered_flags(__entry->flags),
++ __entry->compress_type, __entry->refs)
++);
++
++DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_add,
++
++ TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
++
++ TP_ARGS(inode, ordered)
++);
++
++DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_remove,
++
++ TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
++
++ TP_ARGS(inode, ordered)
++);
++
++DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_start,
++
++ TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
++
++ TP_ARGS(inode, ordered)
++);
++
++DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_put,
++
++ TP_PROTO(struct inode *inode, struct btrfs_ordered_extent *ordered),
++
++ TP_ARGS(inode, ordered)
++);
++
++DECLARE_EVENT_CLASS(btrfs__writepage,
++
++ TP_PROTO(struct page *page, struct inode *inode,
++ struct writeback_control *wbc),
++
++ TP_ARGS(page, inode, wbc),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( pgoff_t, index )
++ __field( long, nr_to_write )
++ __field( long, pages_skipped )
++ __field( loff_t, range_start )
++ __field( loff_t, range_end )
++ __field( char, for_kupdate )
++ __field( char, for_reclaim )
++ __field( char, range_cyclic )
++ __field( pgoff_t, writeback_index )
++ __field( u64, root_objectid )
++ ),
++
++ TP_fast_assign(
++ __entry->ino = inode->i_ino;
++ __entry->index = page->index;
++ __entry->nr_to_write = wbc->nr_to_write;
++ __entry->pages_skipped = wbc->pages_skipped;
++ __entry->range_start = wbc->range_start;
++ __entry->range_end = wbc->range_end;
++ __entry->for_kupdate = wbc->for_kupdate;
++ __entry->for_reclaim = wbc->for_reclaim;
++ __entry->range_cyclic = wbc->range_cyclic;
++ __entry->writeback_index = inode->i_mapping->writeback_index;
++ __entry->root_objectid =
++ BTRFS_I(inode)->root->root_key.objectid;
++ ),
++
++ TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, "
++ "nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, "
++ "range_end = %llu, for_kupdate = %d, "
++ "for_reclaim = %d, range_cyclic = %d, writeback_index = %lu",
++ show_root_type(__entry->root_objectid),
++ (unsigned long)__entry->ino, __entry->index,
++ __entry->nr_to_write, __entry->pages_skipped,
++ __entry->range_start, __entry->range_end,
++ __entry->for_kupdate,
++ __entry->for_reclaim, __entry->range_cyclic,
++ (unsigned long)__entry->writeback_index)
++);
++
++DEFINE_EVENT(btrfs__writepage, __extent_writepage,
++
++ TP_PROTO(struct page *page, struct inode *inode,
++ struct writeback_control *wbc),
++
++ TP_ARGS(page, inode, wbc)
++);
++
++TRACE_EVENT(btrfs_writepage_end_io_hook,
++
++ TP_PROTO(struct page *page, u64 start, u64 end, int uptodate),
++
++ TP_ARGS(page, start, end, uptodate),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( pgoff_t, index )
++ __field( u64, start )
++ __field( u64, end )
++ __field( int, uptodate )
++ __field( u64, root_objectid )
++ ),
++
++ TP_fast_assign(
++ __entry->ino = page->mapping->host->i_ino;
++ __entry->index = page->index;
++ __entry->start = start;
++ __entry->end = end;
++ __entry->uptodate = uptodate;
++ __entry->root_objectid =
++ BTRFS_I(page->mapping->host)->root->root_key.objectid;
++ ),
++
++ TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, start = %llu, "
++ "end = %llu, uptodate = %d",
++ show_root_type(__entry->root_objectid),
++ (unsigned long)__entry->ino, (unsigned long)__entry->index,
++ (unsigned long long)__entry->start,
++ (unsigned long long)__entry->end, __entry->uptodate)
++);
++
++TRACE_EVENT(btrfs_sync_file,
++
++ TP_PROTO(struct file *file, int datasync),
++
++ TP_ARGS(file, datasync),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( ino_t, parent )
++ __field( int, datasync )
++ __field( u64, root_objectid )
++ ),
++
++ TP_fast_assign(
++ struct dentry *dentry = file->f_path.dentry;
++ struct inode *inode = dentry->d_inode;
++
++ __entry->ino = inode->i_ino;
++ __entry->parent = dentry->d_parent->d_inode->i_ino;
++ __entry->datasync = datasync;
++ __entry->root_objectid =
++ BTRFS_I(inode)->root->root_key.objectid;
++ ),
++
++ TP_printk("root = %llu(%s), ino = %ld, parent = %ld, datasync = %d",
++ show_root_type(__entry->root_objectid),
++ (unsigned long)__entry->ino, (unsigned long)__entry->parent,
++ __entry->datasync)
++);
++
++TRACE_EVENT(btrfs_sync_fs,
++
++ TP_PROTO(int wait),
++
++ TP_ARGS(wait),
++
++ TP_STRUCT__entry(
++ __field( int, wait )
++ ),
++
++ TP_fast_assign(
++ __entry->wait = wait;
++ ),
++
++ TP_printk("wait = %d", __entry->wait)
++);
++
++#define show_ref_action(action) \
++ __print_symbolic(action, \
++ { BTRFS_ADD_DELAYED_REF, "ADD_DELAYED_REF" }, \
++ { BTRFS_DROP_DELAYED_REF, "DROP_DELAYED_REF" }, \
++ { BTRFS_ADD_DELAYED_EXTENT, "ADD_DELAYED_EXTENT" }, \
++ { BTRFS_UPDATE_DELAYED_HEAD, "UPDATE_DELAYED_HEAD" })
++
++
++TRACE_EVENT(btrfs_delayed_tree_ref,
++
++ TP_PROTO(struct btrfs_delayed_ref_node *ref,
++ struct btrfs_delayed_tree_ref *full_ref,
++ int action),
++
++ TP_ARGS(ref, full_ref, action),
++
++ TP_STRUCT__entry(
++ __field( u64, bytenr )
++ __field( u64, num_bytes )
++ __field( int, action )
++ __field( u64, parent )
++ __field( u64, ref_root )
++ __field( int, level )
++ __field( int, type )
++ __field( u64, seq )
++ ),
++
++ TP_fast_assign(
++ __entry->bytenr = ref->bytenr;
++ __entry->num_bytes = ref->num_bytes;
++ __entry->action = action;
++ __entry->parent = full_ref->parent;
++ __entry->ref_root = full_ref->root;
++ __entry->level = full_ref->level;
++ __entry->type = ref->type;
++ __entry->seq = ref->seq;
++ ),
++
++ TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, "
++ "parent = %llu(%s), ref_root = %llu(%s), level = %d, "
++ "type = %s, seq = %llu",
++ (unsigned long long)__entry->bytenr,
++ (unsigned long long)__entry->num_bytes,
++ show_ref_action(__entry->action),
++ show_root_type(__entry->parent),
++ show_root_type(__entry->ref_root),
++ __entry->level, show_ref_type(__entry->type),
++ (unsigned long long)__entry->seq)
++);
++
++TRACE_EVENT(btrfs_delayed_data_ref,
++
++ TP_PROTO(struct btrfs_delayed_ref_node *ref,
++ struct btrfs_delayed_data_ref *full_ref,
++ int action),
++
++ TP_ARGS(ref, full_ref, action),
++
++ TP_STRUCT__entry(
++ __field( u64, bytenr )
++ __field( u64, num_bytes )
++ __field( int, action )
++ __field( u64, parent )
++ __field( u64, ref_root )
++ __field( u64, owner )
++ __field( u64, offset )
++ __field( int, type )
++ __field( u64, seq )
++ ),
++
++ TP_fast_assign(
++ __entry->bytenr = ref->bytenr;
++ __entry->num_bytes = ref->num_bytes;
++ __entry->action = action;
++ __entry->parent = full_ref->parent;
++ __entry->ref_root = full_ref->root;
++ __entry->owner = full_ref->objectid;
++ __entry->offset = full_ref->offset;
++ __entry->type = ref->type;
++ __entry->seq = ref->seq;
++ ),
++
++ TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, "
++ "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, "
++ "offset = %llu, type = %s, seq = %llu",
++ (unsigned long long)__entry->bytenr,
++ (unsigned long long)__entry->num_bytes,
++ show_ref_action(__entry->action),
++ show_root_type(__entry->parent),
++ show_root_type(__entry->ref_root),
++ (unsigned long long)__entry->owner,
++ (unsigned long long)__entry->offset,
++ show_ref_type(__entry->type),
++ (unsigned long long)__entry->seq)
++);
++
++TRACE_EVENT(btrfs_delayed_ref_head,
++
++ TP_PROTO(struct btrfs_delayed_ref_node *ref,
++ struct btrfs_delayed_ref_head *head_ref,
++ int action),
++
++ TP_ARGS(ref, head_ref, action),
++
++ TP_STRUCT__entry(
++ __field( u64, bytenr )
++ __field( u64, num_bytes )
++ __field( int, action )
++ __field( int, is_data )
++ ),
++
++ TP_fast_assign(
++ __entry->bytenr = ref->bytenr;
++ __entry->num_bytes = ref->num_bytes;
++ __entry->action = action;
++ __entry->is_data = head_ref->is_data;
++ ),
++
++ TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, is_data = %d",
++ (unsigned long long)__entry->bytenr,
++ (unsigned long long)__entry->num_bytes,
++ show_ref_action(__entry->action),
++ __entry->is_data)
++);
++
++#define show_chunk_type(type) \
++ __print_flags(type, "|", \
++ { BTRFS_BLOCK_GROUP_DATA, "DATA" }, \
++ { BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \
++ { BTRFS_BLOCK_GROUP_METADATA, "METADATA"}, \
++ { BTRFS_BLOCK_GROUP_RAID0, "RAID0" }, \
++ { BTRFS_BLOCK_GROUP_RAID1, "RAID1" }, \
++ { BTRFS_BLOCK_GROUP_DUP, "DUP" }, \
++ { BTRFS_BLOCK_GROUP_RAID10, "RAID10"})
++
++DECLARE_EVENT_CLASS(btrfs__chunk,
++
++ TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
++ u64 offset, u64 size),
++
++ TP_ARGS(root, map, offset, size),
++
++ TP_STRUCT__entry(
++ __field( int, num_stripes )
++ __field( u64, type )
++ __field( int, sub_stripes )
++ __field( u64, offset )
++ __field( u64, size )
++ __field( u64, root_objectid )
++ ),
++
++ TP_fast_assign(
++ __entry->num_stripes = map->num_stripes;
++ __entry->type = map->type;
++ __entry->sub_stripes = map->sub_stripes;
++ __entry->offset = offset;
++ __entry->size = size;
++ __entry->root_objectid = root->root_key.objectid;
++ ),
++
++ TP_printk("root = %llu(%s), offset = %llu, size = %llu, "
++ "num_stripes = %d, sub_stripes = %d, type = %s",
++ show_root_type(__entry->root_objectid),
++ (unsigned long long)__entry->offset,
++ (unsigned long long)__entry->size,
++ __entry->num_stripes, __entry->sub_stripes,
++ show_chunk_type(__entry->type))
++);
++
++DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc,
++
++ TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
++ u64 offset, u64 size),
++
++ TP_ARGS(root, map, offset, size)
++);
++
++DEFINE_EVENT(btrfs__chunk, btrfs_chunk_free,
++
++ TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
++ u64 offset, u64 size),
++
++ TP_ARGS(root, map, offset, size)
++);
++
++TRACE_EVENT(btrfs_cow_block,
++
++ TP_PROTO(struct btrfs_root *root, struct extent_buffer *buf,
++ struct extent_buffer *cow),
++
++ TP_ARGS(root, buf, cow),
++
++ TP_STRUCT__entry(
++ __field( u64, root_objectid )
++ __field( u64, buf_start )
++ __field( int, refs )
++ __field( u64, cow_start )
++ __field( int, buf_level )
++ __field( int, cow_level )
++ ),
++
++ TP_fast_assign(
++ __entry->root_objectid = root->root_key.objectid;
++ __entry->buf_start = buf->start;
++ __entry->refs = atomic_read(&buf->refs);
++ __entry->cow_start = cow->start;
++ __entry->buf_level = btrfs_header_level(buf);
++ __entry->cow_level = btrfs_header_level(cow);
++ ),
++
++ TP_printk("root = %llu(%s), refs = %d, orig_buf = %llu "
++ "(orig_level = %d), cow_buf = %llu (cow_level = %d)",
++ show_root_type(__entry->root_objectid),
++ __entry->refs,
++ (unsigned long long)__entry->buf_start,
++ __entry->buf_level,
++ (unsigned long long)__entry->cow_start,
++ __entry->cow_level)
++);
++
++TRACE_EVENT(btrfs_space_reservation,
++
++ TP_PROTO(struct btrfs_fs_info *fs_info, char *type, u64 val,
++ u64 bytes, int reserve),
++
++ TP_ARGS(fs_info, type, val, bytes, reserve),
++
++ TP_STRUCT__entry(
++ __array( u8, fsid, BTRFS_UUID_SIZE )
++ __string( type, type )
++ __field( u64, val )
++ __field( u64, bytes )
++ __field( int, reserve )
++ ),
++
++ TP_fast_assign(
++ memcpy(__entry->fsid, fs_info->fsid, BTRFS_UUID_SIZE);
++ __assign_str(type, type);
++ __entry->val = val;
++ __entry->bytes = bytes;
++ __entry->reserve = reserve;
++ ),
++
++ TP_printk("%pU: %s: %Lu %s %Lu", __entry->fsid, __get_str(type),
++ __entry->val, __entry->reserve ? "reserve" : "release",
++ __entry->bytes)
++);
++
++DECLARE_EVENT_CLASS(btrfs__reserved_extent,
++
++ TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
++
++ TP_ARGS(root, start, len),
++
++ TP_STRUCT__entry(
++ __field( u64, root_objectid )
++ __field( u64, start )
++ __field( u64, len )
++ ),
++
++ TP_fast_assign(
++ __entry->root_objectid = root->root_key.objectid;
++ __entry->start = start;
++ __entry->len = len;
++ ),
++
++ TP_printk("root = %llu(%s), start = %llu, len = %llu",
++ show_root_type(__entry->root_objectid),
++ (unsigned long long)__entry->start,
++ (unsigned long long)__entry->len)
++);
++
++DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_alloc,
++
++ TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
++
++ TP_ARGS(root, start, len)
++);
++
++DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_free,
++
++ TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
++
++ TP_ARGS(root, start, len)
++);
++
++TRACE_EVENT(find_free_extent,
++
++ TP_PROTO(struct btrfs_root *root, u64 num_bytes, u64 empty_size,
++ u64 data),
++
++ TP_ARGS(root, num_bytes, empty_size, data),
++
++ TP_STRUCT__entry(
++ __field( u64, root_objectid )
++ __field( u64, num_bytes )
++ __field( u64, empty_size )
++ __field( u64, data )
++ ),
++
++ TP_fast_assign(
++ __entry->root_objectid = root->root_key.objectid;
++ __entry->num_bytes = num_bytes;
++ __entry->empty_size = empty_size;
++ __entry->data = data;
++ ),
++
++ TP_printk("root = %Lu(%s), len = %Lu, empty_size = %Lu, "
++ "flags = %Lu(%s)", show_root_type(__entry->root_objectid),
++ __entry->num_bytes, __entry->empty_size, __entry->data,
++ __print_flags((unsigned long)__entry->data, "|",
++ BTRFS_GROUP_FLAGS))
++);
++
++DECLARE_EVENT_CLASS(btrfs__reserve_extent,
++
++ TP_PROTO(struct btrfs_root *root,
++ struct btrfs_block_group_cache *block_group, u64 start,
++ u64 len),
++
++ TP_ARGS(root, block_group, start, len),
++
++ TP_STRUCT__entry(
++ __field( u64, root_objectid )
++ __field( u64, bg_objectid )
++ __field( u64, flags )
++ __field( u64, start )
++ __field( u64, len )
++ ),
++
++ TP_fast_assign(
++ __entry->root_objectid = root->root_key.objectid;
++ __entry->bg_objectid = block_group->key.objectid;
++ __entry->flags = block_group->flags;
++ __entry->start = start;
++ __entry->len = len;
++ ),
++
++ TP_printk("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), "
++ "start = %Lu, len = %Lu",
++ show_root_type(__entry->root_objectid), __entry->bg_objectid,
++ __entry->flags, __print_flags((unsigned long)__entry->flags,
++ "|", BTRFS_GROUP_FLAGS),
++ __entry->start, __entry->len)
++);
++
++DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent,
++
++ TP_PROTO(struct btrfs_root *root,
++ struct btrfs_block_group_cache *block_group, u64 start,
++ u64 len),
++
++ TP_ARGS(root, block_group, start, len)
++);
++
++DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster,
++
++ TP_PROTO(struct btrfs_root *root,
++ struct btrfs_block_group_cache *block_group, u64 start,
++ u64 len),
++
++ TP_ARGS(root, block_group, start, len)
++);
++
++TRACE_EVENT(btrfs_find_cluster,
++
++ TP_PROTO(struct btrfs_block_group_cache *block_group, u64 start,
++ u64 bytes, u64 empty_size, u64 min_bytes),
++
++ TP_ARGS(block_group, start, bytes, empty_size, min_bytes),
++
++ TP_STRUCT__entry(
++ __field( u64, bg_objectid )
++ __field( u64, flags )
++ __field( u64, start )
++ __field( u64, bytes )
++ __field( u64, empty_size )
++ __field( u64, min_bytes )
++ ),
++
++ TP_fast_assign(
++ __entry->bg_objectid = block_group->key.objectid;
++ __entry->flags = block_group->flags;
++ __entry->start = start;
++ __entry->bytes = bytes;
++ __entry->empty_size = empty_size;
++ __entry->min_bytes = min_bytes;
++ ),
++
++ TP_printk("block_group = %Lu, flags = %Lu(%s), start = %Lu, len = %Lu,"
++ " empty_size = %Lu, min_bytes = %Lu", __entry->bg_objectid,
++ __entry->flags,
++ __print_flags((unsigned long)__entry->flags, "|",
++ BTRFS_GROUP_FLAGS), __entry->start,
++ __entry->bytes, __entry->empty_size, __entry->min_bytes)
++);
++
++TRACE_EVENT(btrfs_failed_cluster_setup,
++
++ TP_PROTO(struct btrfs_block_group_cache *block_group),
++
++ TP_ARGS(block_group),
++
++ TP_STRUCT__entry(
++ __field( u64, bg_objectid )
++ ),
++
++ TP_fast_assign(
++ __entry->bg_objectid = block_group->key.objectid;
++ ),
++
++ TP_printk("block_group = %Lu", __entry->bg_objectid)
++);
++
++TRACE_EVENT(btrfs_setup_cluster,
++
++ TP_PROTO(struct btrfs_block_group_cache *block_group,
++ struct btrfs_free_cluster *cluster, u64 size, int bitmap),
++
++ TP_ARGS(block_group, cluster, size, bitmap),
++
++ TP_STRUCT__entry(
++ __field( u64, bg_objectid )
++ __field( u64, flags )
++ __field( u64, start )
++ __field( u64, max_size )
++ __field( u64, size )
++ __field( int, bitmap )
++ ),
++
++ TP_fast_assign(
++ __entry->bg_objectid = block_group->key.objectid;
++ __entry->flags = block_group->flags;
++ __entry->start = cluster->window_start;
++ __entry->max_size = cluster->max_size;
++ __entry->size = size;
++ __entry->bitmap = bitmap;
++ ),
++
++ TP_printk("block_group = %Lu, flags = %Lu(%s), window_start = %Lu, "
++ "size = %Lu, max_size = %Lu, bitmap = %d",
++ __entry->bg_objectid,
++ __entry->flags,
++ __print_flags((unsigned long)__entry->flags, "|",
++ BTRFS_GROUP_FLAGS), __entry->start,
++ __entry->size, __entry->max_size, __entry->bitmap)
++);
++
++struct extent_state;
++TRACE_EVENT(alloc_extent_state,
++
++ TP_PROTO(struct extent_state *state, gfp_t mask, unsigned long IP),
++
++ TP_ARGS(state, mask, IP),
++
++ TP_STRUCT__entry(
++ __field(struct extent_state *, state)
++ __field(gfp_t, mask)
++ __field(unsigned long, ip)
++ ),
++
++ TP_fast_assign(
++ __entry->state = state,
++ __entry->mask = mask,
++ __entry->ip = IP
++ ),
++
++ TP_printk("state=%p; mask = %s; caller = %pF", __entry->state,
++ show_gfp_flags(__entry->mask), (void *)__entry->ip)
++);
++
++TRACE_EVENT(free_extent_state,
++
++ TP_PROTO(struct extent_state *state, unsigned long IP),
++
++ TP_ARGS(state, IP),
++
++ TP_STRUCT__entry(
++ __field(struct extent_state *, state)
++ __field(unsigned long, ip)
++ ),
++
++ TP_fast_assign(
++ __entry->state = state,
++ __entry->ip = IP
++ ),
++
++ TP_printk(" state=%p; caller = %pF", __entry->state,
++ (void *)__entry->ip)
++);
++
++#endif /* _TRACE_BTRFS_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/compaction.h
+@@ -0,0 +1,74 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM compaction
++
++#if !defined(_TRACE_COMPACTION_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_COMPACTION_H
++
++#include <linux/types.h>
++#include <linux/tracepoint.h>
++#include <trace/events/gfpflags.h>
++
++DECLARE_EVENT_CLASS(mm_compaction_isolate_template,
++
++ TP_PROTO(unsigned long nr_scanned,
++ unsigned long nr_taken),
++
++ TP_ARGS(nr_scanned, nr_taken),
++
++ TP_STRUCT__entry(
++ __field(unsigned long, nr_scanned)
++ __field(unsigned long, nr_taken)
++ ),
++
++ TP_fast_assign(
++ __entry->nr_scanned = nr_scanned;
++ __entry->nr_taken = nr_taken;
++ ),
++
++ TP_printk("nr_scanned=%lu nr_taken=%lu",
++ __entry->nr_scanned,
++ __entry->nr_taken)
++);
++
++DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_migratepages,
++
++ TP_PROTO(unsigned long nr_scanned,
++ unsigned long nr_taken),
++
++ TP_ARGS(nr_scanned, nr_taken)
++);
++
++DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
++ TP_PROTO(unsigned long nr_scanned,
++ unsigned long nr_taken),
++
++ TP_ARGS(nr_scanned, nr_taken)
++);
++
++TRACE_EVENT(mm_compaction_migratepages,
++
++ TP_PROTO(unsigned long nr_migrated,
++ unsigned long nr_failed),
++
++ TP_ARGS(nr_migrated, nr_failed),
++
++ TP_STRUCT__entry(
++ __field(unsigned long, nr_migrated)
++ __field(unsigned long, nr_failed)
++ ),
++
++ TP_fast_assign(
++ __entry->nr_migrated = nr_migrated;
++ __entry->nr_failed = nr_failed;
++ ),
++
++ TP_printk("nr_migrated=%lu nr_failed=%lu",
++ __entry->nr_migrated,
++ __entry->nr_failed)
++);
++
++
++#endif /* _TRACE_COMPACTION_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/ext3.h
+@@ -0,0 +1,864 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM ext3
++
++#if !defined(_TRACE_EXT3_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_EXT3_H
++
++#include <linux/tracepoint.h>
++
++TRACE_EVENT(ext3_free_inode,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( umode_t, mode )
++ __field( uid_t, uid )
++ __field( gid_t, gid )
++ __field( blkcnt_t, blocks )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->mode = inode->i_mode;
++ __entry->uid = i_uid_read(inode);
++ __entry->gid = i_gid_read(inode);
++ __entry->blocks = inode->i_blocks;
++ ),
++
++ TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->mode, __entry->uid, __entry->gid,
++ (unsigned long) __entry->blocks)
++);
++
++TRACE_EVENT(ext3_request_inode,
++ TP_PROTO(struct inode *dir, int mode),
++
++ TP_ARGS(dir, mode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, dir )
++ __field( umode_t, mode )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = dir->i_sb->s_dev;
++ __entry->dir = dir->i_ino;
++ __entry->mode = mode;
++ ),
++
++ TP_printk("dev %d,%d dir %lu mode 0%o",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->dir, __entry->mode)
++);
++
++TRACE_EVENT(ext3_allocate_inode,
++ TP_PROTO(struct inode *inode, struct inode *dir, int mode),
++
++ TP_ARGS(inode, dir, mode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ino_t, dir )
++ __field( umode_t, mode )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->dir = dir->i_ino;
++ __entry->mode = mode;
++ ),
++
++ TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long) __entry->dir, __entry->mode)
++);
++
++TRACE_EVENT(ext3_evict_inode,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( int, nlink )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->nlink = inode->i_nlink;
++ ),
++
++ TP_printk("dev %d,%d ino %lu nlink %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->nlink)
++);
++
++TRACE_EVENT(ext3_drop_inode,
++ TP_PROTO(struct inode *inode, int drop),
++
++ TP_ARGS(inode, drop),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( int, drop )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->drop = drop;
++ ),
++
++ TP_printk("dev %d,%d ino %lu drop %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->drop)
++);
++
++TRACE_EVENT(ext3_mark_inode_dirty,
++ TP_PROTO(struct inode *inode, unsigned long IP),
++
++ TP_ARGS(inode, IP),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field(unsigned long, ip )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->ip = IP;
++ ),
++
++ TP_printk("dev %d,%d ino %lu caller %pF",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, (void *)__entry->ip)
++);
++
++TRACE_EVENT(ext3_write_begin,
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int flags),
++
++ TP_ARGS(inode, pos, len, flags),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( loff_t, pos )
++ __field( unsigned int, len )
++ __field( unsigned int, flags )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->pos = pos;
++ __entry->len = len;
++ __entry->flags = flags;
++ ),
++
++ TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long long) __entry->pos, __entry->len,
++ __entry->flags)
++);
++
++DECLARE_EVENT_CLASS(ext3__write_end,
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int copied),
++
++ TP_ARGS(inode, pos, len, copied),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( loff_t, pos )
++ __field( unsigned int, len )
++ __field( unsigned int, copied )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->pos = pos;
++ __entry->len = len;
++ __entry->copied = copied;
++ ),
++
++ TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long long) __entry->pos, __entry->len,
++ __entry->copied)
++);
++
++DEFINE_EVENT(ext3__write_end, ext3_ordered_write_end,
++
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int copied),
++
++ TP_ARGS(inode, pos, len, copied)
++);
++
++DEFINE_EVENT(ext3__write_end, ext3_writeback_write_end,
++
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int copied),
++
++ TP_ARGS(inode, pos, len, copied)
++);
++
++DEFINE_EVENT(ext3__write_end, ext3_journalled_write_end,
++
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int copied),
++
++ TP_ARGS(inode, pos, len, copied)
++);
++
++DECLARE_EVENT_CLASS(ext3__page_op,
++ TP_PROTO(struct page *page),
++
++ TP_ARGS(page),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( pgoff_t, index )
++
++ ),
++
++ TP_fast_assign(
++ __entry->index = page->index;
++ __entry->ino = page->mapping->host->i_ino;
++ __entry->dev = page->mapping->host->i_sb->s_dev;
++ ),
++
++ TP_printk("dev %d,%d ino %lu page_index %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->index)
++);
++
++DEFINE_EVENT(ext3__page_op, ext3_ordered_writepage,
++
++ TP_PROTO(struct page *page),
++
++ TP_ARGS(page)
++);
++
++DEFINE_EVENT(ext3__page_op, ext3_writeback_writepage,
++
++ TP_PROTO(struct page *page),
++
++ TP_ARGS(page)
++);
++
++DEFINE_EVENT(ext3__page_op, ext3_journalled_writepage,
++
++ TP_PROTO(struct page *page),
++
++ TP_ARGS(page)
++);
++
++DEFINE_EVENT(ext3__page_op, ext3_readpage,
++
++ TP_PROTO(struct page *page),
++
++ TP_ARGS(page)
++);
++
++DEFINE_EVENT(ext3__page_op, ext3_releasepage,
++
++ TP_PROTO(struct page *page),
++
++ TP_ARGS(page)
++);
++
++TRACE_EVENT(ext3_invalidatepage,
++ TP_PROTO(struct page *page, unsigned long offset),
++
++ TP_ARGS(page, offset),
++
++ TP_STRUCT__entry(
++ __field( pgoff_t, index )
++ __field( unsigned long, offset )
++ __field( ino_t, ino )
++ __field( dev_t, dev )
++
++ ),
++
++ TP_fast_assign(
++ __entry->index = page->index;
++ __entry->offset = offset;
++ __entry->ino = page->mapping->host->i_ino;
++ __entry->dev = page->mapping->host->i_sb->s_dev;
++ ),
++
++ TP_printk("dev %d,%d ino %lu page_index %lu offset %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->index, __entry->offset)
++);
++
++TRACE_EVENT(ext3_discard_blocks,
++ TP_PROTO(struct super_block *sb, unsigned long blk,
++ unsigned long count),
++
++ TP_ARGS(sb, blk, count),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( unsigned long, blk )
++ __field( unsigned long, count )
++
++ ),
++
++ TP_fast_assign(
++ __entry->dev = sb->s_dev;
++ __entry->blk = blk;
++ __entry->count = count;
++ ),
++
++ TP_printk("dev %d,%d blk %lu count %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->blk, __entry->count)
++);
++
++TRACE_EVENT(ext3_request_blocks,
++ TP_PROTO(struct inode *inode, unsigned long goal,
++ unsigned long count),
++
++ TP_ARGS(inode, goal, count),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( unsigned long, count )
++ __field( unsigned long, goal )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->count = count;
++ __entry->goal = goal;
++ ),
++
++ TP_printk("dev %d,%d ino %lu count %lu goal %lu ",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->count, __entry->goal)
++);
++
++TRACE_EVENT(ext3_allocate_blocks,
++ TP_PROTO(struct inode *inode, unsigned long goal,
++ unsigned long count, unsigned long block),
++
++ TP_ARGS(inode, goal, count, block),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( unsigned long, block )
++ __field( unsigned long, count )
++ __field( unsigned long, goal )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->block = block;
++ __entry->count = count;
++ __entry->goal = goal;
++ ),
++
++ TP_printk("dev %d,%d ino %lu count %lu block %lu goal %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->count, __entry->block,
++ __entry->goal)
++);
++
++TRACE_EVENT(ext3_free_blocks,
++ TP_PROTO(struct inode *inode, unsigned long block,
++ unsigned long count),
++
++ TP_ARGS(inode, block, count),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( umode_t, mode )
++ __field( unsigned long, block )
++ __field( unsigned long, count )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->mode = inode->i_mode;
++ __entry->block = block;
++ __entry->count = count;
++ ),
++
++ TP_printk("dev %d,%d ino %lu mode 0%o block %lu count %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->mode, __entry->block, __entry->count)
++);
++
++TRACE_EVENT(ext3_sync_file_enter,
++ TP_PROTO(struct file *file, int datasync),
++
++ TP_ARGS(file, datasync),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ino_t, parent )
++ __field( int, datasync )
++ ),
++
++ TP_fast_assign(
++ struct dentry *dentry = file->f_path.dentry;
++
++ __entry->dev = dentry->d_inode->i_sb->s_dev;
++ __entry->ino = dentry->d_inode->i_ino;
++ __entry->datasync = datasync;
++ __entry->parent = dentry->d_parent->d_inode->i_ino;
++ ),
++
++ TP_printk("dev %d,%d ino %lu parent %ld datasync %d ",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long) __entry->parent, __entry->datasync)
++);
++
++TRACE_EVENT(ext3_sync_file_exit,
++ TP_PROTO(struct inode *inode, int ret),
++
++ TP_ARGS(inode, ret),
++
++ TP_STRUCT__entry(
++ __field( int, ret )
++ __field( ino_t, ino )
++ __field( dev_t, dev )
++ ),
++
++ TP_fast_assign(
++ __entry->ret = ret;
++ __entry->ino = inode->i_ino;
++ __entry->dev = inode->i_sb->s_dev;
++ ),
++
++ TP_printk("dev %d,%d ino %lu ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->ret)
++);
++
++TRACE_EVENT(ext3_sync_fs,
++ TP_PROTO(struct super_block *sb, int wait),
++
++ TP_ARGS(sb, wait),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, wait )
++
++ ),
++
++ TP_fast_assign(
++ __entry->dev = sb->s_dev;
++ __entry->wait = wait;
++ ),
++
++ TP_printk("dev %d,%d wait %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->wait)
++);
++
++TRACE_EVENT(ext3_rsv_window_add,
++ TP_PROTO(struct super_block *sb,
++ struct ext3_reserve_window_node *rsv_node),
++
++ TP_ARGS(sb, rsv_node),
++
++ TP_STRUCT__entry(
++ __field( unsigned long, start )
++ __field( unsigned long, end )
++ __field( dev_t, dev )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = sb->s_dev;
++ __entry->start = rsv_node->rsv_window._rsv_start;
++ __entry->end = rsv_node->rsv_window._rsv_end;
++ ),
++
++ TP_printk("dev %d,%d start %lu end %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->start, __entry->end)
++);
++
++TRACE_EVENT(ext3_discard_reservation,
++ TP_PROTO(struct inode *inode,
++ struct ext3_reserve_window_node *rsv_node),
++
++ TP_ARGS(inode, rsv_node),
++
++ TP_STRUCT__entry(
++ __field( unsigned long, start )
++ __field( unsigned long, end )
++ __field( ino_t, ino )
++ __field( dev_t, dev )
++ ),
++
++ TP_fast_assign(
++ __entry->start = rsv_node->rsv_window._rsv_start;
++ __entry->end = rsv_node->rsv_window._rsv_end;
++ __entry->ino = inode->i_ino;
++ __entry->dev = inode->i_sb->s_dev;
++ ),
++
++ TP_printk("dev %d,%d ino %lu start %lu end %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long)__entry->ino, __entry->start,
++ __entry->end)
++);
++
++TRACE_EVENT(ext3_alloc_new_reservation,
++ TP_PROTO(struct super_block *sb, unsigned long goal),
++
++ TP_ARGS(sb, goal),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( unsigned long, goal )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = sb->s_dev;
++ __entry->goal = goal;
++ ),
++
++ TP_printk("dev %d,%d goal %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->goal)
++);
++
++TRACE_EVENT(ext3_reserved,
++ TP_PROTO(struct super_block *sb, unsigned long block,
++ struct ext3_reserve_window_node *rsv_node),
++
++ TP_ARGS(sb, block, rsv_node),
++
++ TP_STRUCT__entry(
++ __field( unsigned long, block )
++ __field( unsigned long, start )
++ __field( unsigned long, end )
++ __field( dev_t, dev )
++ ),
++
++ TP_fast_assign(
++ __entry->block = block;
++ __entry->start = rsv_node->rsv_window._rsv_start;
++ __entry->end = rsv_node->rsv_window._rsv_end;
++ __entry->dev = sb->s_dev;
++ ),
++
++ TP_printk("dev %d,%d block %lu, start %lu end %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->block, __entry->start, __entry->end)
++);
++
++TRACE_EVENT(ext3_forget,
++ TP_PROTO(struct inode *inode, int is_metadata, unsigned long block),
++
++ TP_ARGS(inode, is_metadata, block),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( umode_t, mode )
++ __field( int, is_metadata )
++ __field( unsigned long, block )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->mode = inode->i_mode;
++ __entry->is_metadata = is_metadata;
++ __entry->block = block;
++ ),
++
++ TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->mode, __entry->is_metadata, __entry->block)
++);
++
++TRACE_EVENT(ext3_read_block_bitmap,
++ TP_PROTO(struct super_block *sb, unsigned int group),
++
++ TP_ARGS(sb, group),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( __u32, group )
++
++ ),
++
++ TP_fast_assign(
++ __entry->dev = sb->s_dev;
++ __entry->group = group;
++ ),
++
++ TP_printk("dev %d,%d group %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->group)
++);
++
++TRACE_EVENT(ext3_direct_IO_enter,
++ TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
++
++ TP_ARGS(inode, offset, len, rw),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( dev_t, dev )
++ __field( loff_t, pos )
++ __field( unsigned long, len )
++ __field( int, rw )
++ ),
++
++ TP_fast_assign(
++ __entry->ino = inode->i_ino;
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->pos = offset;
++ __entry->len = len;
++ __entry->rw = rw;
++ ),
++
++ TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long long) __entry->pos, __entry->len,
++ __entry->rw)
++);
++
++TRACE_EVENT(ext3_direct_IO_exit,
++ TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
++ int rw, int ret),
++
++ TP_ARGS(inode, offset, len, rw, ret),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( dev_t, dev )
++ __field( loff_t, pos )
++ __field( unsigned long, len )
++ __field( int, rw )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ __entry->ino = inode->i_ino;
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->pos = offset;
++ __entry->len = len;
++ __entry->rw = rw;
++ __entry->ret = ret;
++ ),
++
++ TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long long) __entry->pos, __entry->len,
++ __entry->rw, __entry->ret)
++);
++
++TRACE_EVENT(ext3_unlink_enter,
++ TP_PROTO(struct inode *parent, struct dentry *dentry),
++
++ TP_ARGS(parent, dentry),
++
++ TP_STRUCT__entry(
++ __field( ino_t, parent )
++ __field( ino_t, ino )
++ __field( loff_t, size )
++ __field( dev_t, dev )
++ ),
++
++ TP_fast_assign(
++ __entry->parent = parent->i_ino;
++ __entry->ino = dentry->d_inode->i_ino;
++ __entry->size = dentry->d_inode->i_size;
++ __entry->dev = dentry->d_inode->i_sb->s_dev;
++ ),
++
++ TP_printk("dev %d,%d ino %lu size %lld parent %ld",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long long)__entry->size,
++ (unsigned long) __entry->parent)
++);
++
++TRACE_EVENT(ext3_unlink_exit,
++ TP_PROTO(struct dentry *dentry, int ret),
++
++ TP_ARGS(dentry, ret),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( dev_t, dev )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ __entry->ino = dentry->d_inode->i_ino;
++ __entry->dev = dentry->d_inode->i_sb->s_dev;
++ __entry->ret = ret;
++ ),
++
++ TP_printk("dev %d,%d ino %lu ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->ret)
++);
++
++DECLARE_EVENT_CLASS(ext3__truncate,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( dev_t, dev )
++ __field( blkcnt_t, blocks )
++ ),
++
++ TP_fast_assign(
++ __entry->ino = inode->i_ino;
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->blocks = inode->i_blocks;
++ ),
++
++ TP_printk("dev %d,%d ino %lu blocks %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, (unsigned long) __entry->blocks)
++);
++
++DEFINE_EVENT(ext3__truncate, ext3_truncate_enter,
++
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode)
++);
++
++DEFINE_EVENT(ext3__truncate, ext3_truncate_exit,
++
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode)
++);
++
++TRACE_EVENT(ext3_get_blocks_enter,
++ TP_PROTO(struct inode *inode, unsigned long lblk,
++ unsigned long len, int create),
++
++ TP_ARGS(inode, lblk, len, create),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( dev_t, dev )
++ __field( unsigned long, lblk )
++ __field( unsigned long, len )
++ __field( int, create )
++ ),
++
++ TP_fast_assign(
++ __entry->ino = inode->i_ino;
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->lblk = lblk;
++ __entry->len = len;
++ __entry->create = create;
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %lu len %lu create %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->lblk, __entry->len, __entry->create)
++);
++
++TRACE_EVENT(ext3_get_blocks_exit,
++ TP_PROTO(struct inode *inode, unsigned long lblk,
++ unsigned long pblk, unsigned long len, int ret),
++
++ TP_ARGS(inode, lblk, pblk, len, ret),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( dev_t, dev )
++ __field( unsigned long, lblk )
++ __field( unsigned long, pblk )
++ __field( unsigned long, len )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ __entry->ino = inode->i_ino;
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->lblk = lblk;
++ __entry->pblk = pblk;
++ __entry->len = len;
++ __entry->ret = ret;
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %lu pblk %lu len %lu ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->lblk, __entry->pblk,
++ __entry->len, __entry->ret)
++);
++
++TRACE_EVENT(ext3_load_inode,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( ino_t, ino )
++ __field( dev_t, dev )
++ ),
++
++ TP_fast_assign(
++ __entry->ino = inode->i_ino;
++ __entry->dev = inode->i_sb->s_dev;
++ ),
++
++ TP_printk("dev %d,%d ino %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino)
++);
++
++#endif /* _TRACE_EXT3_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/ext4.h
+@@ -0,0 +1,2061 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM ext4
++
++#if !defined(_TRACE_EXT4_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_EXT4_H
++
++#include <linux/writeback.h>
++#include <linux/tracepoint.h>
++
++struct ext4_allocation_context;
++struct ext4_allocation_request;
++struct ext4_extent;
++struct ext4_prealloc_space;
++struct ext4_inode_info;
++struct mpage_da_data;
++struct ext4_map_blocks;
++struct ext4_extent;
++
++#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
++
++TRACE_EVENT(ext4_free_inode,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( uid_t, uid )
++ __field( gid_t, gid )
++ __field( __u64, blocks )
++ __field( __u16, mode )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->uid = i_uid_read(inode);
++ __entry->gid = i_gid_read(inode);
++ __entry->blocks = inode->i_blocks;
++ __entry->mode = inode->i_mode;
++ ),
++
++ TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->mode,
++ __entry->uid, __entry->gid, __entry->blocks)
++);
++
++TRACE_EVENT(ext4_request_inode,
++ TP_PROTO(struct inode *dir, int mode),
++
++ TP_ARGS(dir, mode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, dir )
++ __field( __u16, mode )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = dir->i_sb->s_dev;
++ __entry->dir = dir->i_ino;
++ __entry->mode = mode;
++ ),
++
++ TP_printk("dev %d,%d dir %lu mode 0%o",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->dir, __entry->mode)
++);
++
++TRACE_EVENT(ext4_allocate_inode,
++ TP_PROTO(struct inode *inode, struct inode *dir, int mode),
++
++ TP_ARGS(inode, dir, mode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ino_t, dir )
++ __field( __u16, mode )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->dir = dir->i_ino;
++ __entry->mode = mode;
++ ),
++
++ TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long) __entry->dir, __entry->mode)
++);
++
++TRACE_EVENT(ext4_evict_inode,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( int, nlink )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->nlink = inode->i_nlink;
++ ),
++
++ TP_printk("dev %d,%d ino %lu nlink %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->nlink)
++);
++
++TRACE_EVENT(ext4_drop_inode,
++ TP_PROTO(struct inode *inode, int drop),
++
++ TP_ARGS(inode, drop),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( int, drop )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->drop = drop;
++ ),
++
++ TP_printk("dev %d,%d ino %lu drop %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->drop)
++);
++
++TRACE_EVENT(ext4_mark_inode_dirty,
++ TP_PROTO(struct inode *inode, unsigned long IP),
++
++ TP_ARGS(inode, IP),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field(unsigned long, ip )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->ip = IP;
++ ),
++
++ TP_printk("dev %d,%d ino %lu caller %pF",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, (void *)__entry->ip)
++);
++
++TRACE_EVENT(ext4_begin_ordered_truncate,
++ TP_PROTO(struct inode *inode, loff_t new_size),
++
++ TP_ARGS(inode, new_size),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( loff_t, new_size )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->new_size = new_size;
++ ),
++
++ TP_printk("dev %d,%d ino %lu new_size %lld",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->new_size)
++);
++
++DECLARE_EVENT_CLASS(ext4__write_begin,
++
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int flags),
++
++ TP_ARGS(inode, pos, len, flags),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( loff_t, pos )
++ __field( unsigned int, len )
++ __field( unsigned int, flags )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->pos = pos;
++ __entry->len = len;
++ __entry->flags = flags;
++ ),
++
++ TP_printk("dev %d,%d ino %lu pos %lld len %u flags %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->pos, __entry->len, __entry->flags)
++);
++
++DEFINE_EVENT(ext4__write_begin, ext4_write_begin,
++
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int flags),
++
++ TP_ARGS(inode, pos, len, flags)
++);
++
++DEFINE_EVENT(ext4__write_begin, ext4_da_write_begin,
++
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int flags),
++
++ TP_ARGS(inode, pos, len, flags)
++);
++
++DECLARE_EVENT_CLASS(ext4__write_end,
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int copied),
++
++ TP_ARGS(inode, pos, len, copied),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( loff_t, pos )
++ __field( unsigned int, len )
++ __field( unsigned int, copied )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->pos = pos;
++ __entry->len = len;
++ __entry->copied = copied;
++ ),
++
++ TP_printk("dev %d,%d ino %lu pos %lld len %u copied %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->pos, __entry->len, __entry->copied)
++);
++
++DEFINE_EVENT(ext4__write_end, ext4_ordered_write_end,
++
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int copied),
++
++ TP_ARGS(inode, pos, len, copied)
++);
++
++DEFINE_EVENT(ext4__write_end, ext4_writeback_write_end,
++
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int copied),
++
++ TP_ARGS(inode, pos, len, copied)
++);
++
++DEFINE_EVENT(ext4__write_end, ext4_journalled_write_end,
++
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int copied),
++
++ TP_ARGS(inode, pos, len, copied)
++);
++
++DEFINE_EVENT(ext4__write_end, ext4_da_write_end,
++
++ TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
++ unsigned int copied),
++
++ TP_ARGS(inode, pos, len, copied)
++);
++
++TRACE_EVENT(ext4_da_writepages,
++ TP_PROTO(struct inode *inode, struct writeback_control *wbc),
++
++ TP_ARGS(inode, wbc),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( long, nr_to_write )
++ __field( long, pages_skipped )
++ __field( loff_t, range_start )
++ __field( loff_t, range_end )
++ __field( pgoff_t, writeback_index )
++ __field( int, sync_mode )
++ __field( char, for_kupdate )
++ __field( char, range_cyclic )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->nr_to_write = wbc->nr_to_write;
++ __entry->pages_skipped = wbc->pages_skipped;
++ __entry->range_start = wbc->range_start;
++ __entry->range_end = wbc->range_end;
++ __entry->writeback_index = inode->i_mapping->writeback_index;
++ __entry->sync_mode = wbc->sync_mode;
++ __entry->for_kupdate = wbc->for_kupdate;
++ __entry->range_cyclic = wbc->range_cyclic;
++ ),
++
++ TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld "
++ "range_start %lld range_end %lld sync_mode %d "
++ "for_kupdate %d range_cyclic %d writeback_index %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->nr_to_write,
++ __entry->pages_skipped, __entry->range_start,
++ __entry->range_end, __entry->sync_mode,
++ __entry->for_kupdate, __entry->range_cyclic,
++ (unsigned long) __entry->writeback_index)
++);
++
++TRACE_EVENT(ext4_da_write_pages,
++ TP_PROTO(struct inode *inode, struct mpage_da_data *mpd),
++
++ TP_ARGS(inode, mpd),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u64, b_blocknr )
++ __field( __u32, b_size )
++ __field( __u32, b_state )
++ __field( unsigned long, first_page )
++ __field( int, io_done )
++ __field( int, pages_written )
++ __field( int, sync_mode )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->b_blocknr = mpd->b_blocknr;
++ __entry->b_size = mpd->b_size;
++ __entry->b_state = mpd->b_state;
++ __entry->first_page = mpd->first_page;
++ __entry->io_done = mpd->io_done;
++ __entry->pages_written = mpd->pages_written;
++ __entry->sync_mode = mpd->wbc->sync_mode;
++ ),
++
++ TP_printk("dev %d,%d ino %lu b_blocknr %llu b_size %u b_state 0x%04x "
++ "first_page %lu io_done %d pages_written %d sync_mode %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->b_blocknr, __entry->b_size,
++ __entry->b_state, __entry->first_page,
++ __entry->io_done, __entry->pages_written,
++ __entry->sync_mode
++ )
++);
++
++TRACE_EVENT(ext4_da_writepages_result,
++ TP_PROTO(struct inode *inode, struct writeback_control *wbc,
++ int ret, int pages_written),
++
++ TP_ARGS(inode, wbc, ret, pages_written),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( int, ret )
++ __field( int, pages_written )
++ __field( long, pages_skipped )
++ __field( pgoff_t, writeback_index )
++ __field( int, sync_mode )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->ret = ret;
++ __entry->pages_written = pages_written;
++ __entry->pages_skipped = wbc->pages_skipped;
++ __entry->writeback_index = inode->i_mapping->writeback_index;
++ __entry->sync_mode = wbc->sync_mode;
++ ),
++
++ TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld "
++ "sync_mode %d writeback_index %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->ret,
++ __entry->pages_written, __entry->pages_skipped,
++ __entry->sync_mode,
++ (unsigned long) __entry->writeback_index)
++);
++
++DECLARE_EVENT_CLASS(ext4__page_op,
++ TP_PROTO(struct page *page),
++
++ TP_ARGS(page),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( pgoff_t, index )
++
++ ),
++
++ TP_fast_assign(
++ __entry->dev = page->mapping->host->i_sb->s_dev;
++ __entry->ino = page->mapping->host->i_ino;
++ __entry->index = page->index;
++ ),
++
++ TP_printk("dev %d,%d ino %lu page_index %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long) __entry->index)
++);
++
++DEFINE_EVENT(ext4__page_op, ext4_writepage,
++
++ TP_PROTO(struct page *page),
++
++ TP_ARGS(page)
++);
++
++DEFINE_EVENT(ext4__page_op, ext4_readpage,
++
++ TP_PROTO(struct page *page),
++
++ TP_ARGS(page)
++);
++
++DEFINE_EVENT(ext4__page_op, ext4_releasepage,
++
++ TP_PROTO(struct page *page),
++
++ TP_ARGS(page)
++);
++
++TRACE_EVENT(ext4_invalidatepage,
++ TP_PROTO(struct page *page, unsigned long offset),
++
++ TP_ARGS(page, offset),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( pgoff_t, index )
++ __field( unsigned long, offset )
++
++ ),
++
++ TP_fast_assign(
++ __entry->dev = page->mapping->host->i_sb->s_dev;
++ __entry->ino = page->mapping->host->i_ino;
++ __entry->index = page->index;
++ __entry->offset = offset;
++ ),
++
++ TP_printk("dev %d,%d ino %lu page_index %lu offset %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long) __entry->index, __entry->offset)
++);
++
++TRACE_EVENT(ext4_discard_blocks,
++ TP_PROTO(struct super_block *sb, unsigned long long blk,
++ unsigned long long count),
++
++ TP_ARGS(sb, blk, count),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( __u64, blk )
++ __field( __u64, count )
++
++ ),
++
++ TP_fast_assign(
++ __entry->dev = sb->s_dev;
++ __entry->blk = blk;
++ __entry->count = count;
++ ),
++
++ TP_printk("dev %d,%d blk %llu count %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->blk, __entry->count)
++);
++
++DECLARE_EVENT_CLASS(ext4__mb_new_pa,
++ TP_PROTO(struct ext4_allocation_context *ac,
++ struct ext4_prealloc_space *pa),
++
++ TP_ARGS(ac, pa),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u64, pa_pstart )
++ __field( __u64, pa_lstart )
++ __field( __u32, pa_len )
++
++ ),
++
++ TP_fast_assign(
++ __entry->dev = ac->ac_sb->s_dev;
++ __entry->ino = ac->ac_inode->i_ino;
++ __entry->pa_pstart = pa->pa_pstart;
++ __entry->pa_lstart = pa->pa_lstart;
++ __entry->pa_len = pa->pa_len;
++ ),
++
++ TP_printk("dev %d,%d ino %lu pstart %llu len %u lstart %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart)
++);
++
++DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_inode_pa,
++
++ TP_PROTO(struct ext4_allocation_context *ac,
++ struct ext4_prealloc_space *pa),
++
++ TP_ARGS(ac, pa)
++);
++
++DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_group_pa,
++
++ TP_PROTO(struct ext4_allocation_context *ac,
++ struct ext4_prealloc_space *pa),
++
++ TP_ARGS(ac, pa)
++);
++
++TRACE_EVENT(ext4_mb_release_inode_pa,
++ TP_PROTO(struct ext4_prealloc_space *pa,
++ unsigned long long block, unsigned int count),
++
++ TP_ARGS(pa, block, count),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u64, block )
++ __field( __u32, count )
++
++ ),
++
++ TP_fast_assign(
++ __entry->dev = pa->pa_inode->i_sb->s_dev;
++ __entry->ino = pa->pa_inode->i_ino;
++ __entry->block = block;
++ __entry->count = count;
++ ),
++
++ TP_printk("dev %d,%d ino %lu block %llu count %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->block, __entry->count)
++);
++
++TRACE_EVENT(ext4_mb_release_group_pa,
++ TP_PROTO(struct super_block *sb, struct ext4_prealloc_space *pa),
++
++ TP_ARGS(sb, pa),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( __u64, pa_pstart )
++ __field( __u32, pa_len )
++
++ ),
++
++ TP_fast_assign(
++ __entry->dev = sb->s_dev;
++ __entry->pa_pstart = pa->pa_pstart;
++ __entry->pa_len = pa->pa_len;
++ ),
++
++ TP_printk("dev %d,%d pstart %llu len %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->pa_pstart, __entry->pa_len)
++);
++
++TRACE_EVENT(ext4_discard_preallocations,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ ),
++
++ TP_printk("dev %d,%d ino %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino)
++);
++
++TRACE_EVENT(ext4_mb_discard_preallocations,
++ TP_PROTO(struct super_block *sb, int needed),
++
++ TP_ARGS(sb, needed),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, needed )
++
++ ),
++
++ TP_fast_assign(
++ __entry->dev = sb->s_dev;
++ __entry->needed = needed;
++ ),
++
++ TP_printk("dev %d,%d needed %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->needed)
++);
++
++TRACE_EVENT(ext4_request_blocks,
++ TP_PROTO(struct ext4_allocation_request *ar),
++
++ TP_ARGS(ar),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( unsigned int, len )
++ __field( __u32, logical )
++ __field( __u32, lleft )
++ __field( __u32, lright )
++ __field( __u64, goal )
++ __field( __u64, pleft )
++ __field( __u64, pright )
++ __field( unsigned int, flags )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = ar->inode->i_sb->s_dev;
++ __entry->ino = ar->inode->i_ino;
++ __entry->len = ar->len;
++ __entry->logical = ar->logical;
++ __entry->goal = ar->goal;
++ __entry->lleft = ar->lleft;
++ __entry->lright = ar->lright;
++ __entry->pleft = ar->pleft;
++ __entry->pright = ar->pright;
++ __entry->flags = ar->flags;
++ ),
++
++ TP_printk("dev %d,%d ino %lu flags %u len %u lblk %u goal %llu "
++ "lleft %u lright %u pleft %llu pright %llu ",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->flags,
++ __entry->len, __entry->logical, __entry->goal,
++ __entry->lleft, __entry->lright, __entry->pleft,
++ __entry->pright)
++);
++
++TRACE_EVENT(ext4_allocate_blocks,
++ TP_PROTO(struct ext4_allocation_request *ar, unsigned long long block),
++
++ TP_ARGS(ar, block),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u64, block )
++ __field( unsigned int, len )
++ __field( __u32, logical )
++ __field( __u32, lleft )
++ __field( __u32, lright )
++ __field( __u64, goal )
++ __field( __u64, pleft )
++ __field( __u64, pright )
++ __field( unsigned int, flags )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = ar->inode->i_sb->s_dev;
++ __entry->ino = ar->inode->i_ino;
++ __entry->block = block;
++ __entry->len = ar->len;
++ __entry->logical = ar->logical;
++ __entry->goal = ar->goal;
++ __entry->lleft = ar->lleft;
++ __entry->lright = ar->lright;
++ __entry->pleft = ar->pleft;
++ __entry->pright = ar->pright;
++ __entry->flags = ar->flags;
++ ),
++
++ TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %u "
++ "goal %llu lleft %u lright %u pleft %llu pright %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->flags,
++ __entry->len, __entry->block, __entry->logical,
++ __entry->goal, __entry->lleft, __entry->lright,
++ __entry->pleft, __entry->pright)
++);
++
++TRACE_EVENT(ext4_free_blocks,
++ TP_PROTO(struct inode *inode, __u64 block, unsigned long count,
++ int flags),
++
++ TP_ARGS(inode, block, count, flags),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u64, block )
++ __field( unsigned long, count )
++ __field( int, flags )
++ __field( __u16, mode )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->block = block;
++ __entry->count = count;
++ __entry->flags = flags;
++ __entry->mode = inode->i_mode;
++ ),
++
++ TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->mode, __entry->block, __entry->count,
++ __entry->flags)
++);
++
++TRACE_EVENT(ext4_sync_file_enter,
++ TP_PROTO(struct file *file, int datasync),
++
++ TP_ARGS(file, datasync),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ino_t, parent )
++ __field( int, datasync )
++ ),
++
++ TP_fast_assign(
++ struct dentry *dentry = file->f_path.dentry;
++
++ __entry->dev = dentry->d_inode->i_sb->s_dev;
++ __entry->ino = dentry->d_inode->i_ino;
++ __entry->datasync = datasync;
++ __entry->parent = dentry->d_parent->d_inode->i_ino;
++ ),
++
++ TP_printk("dev %d,%d ino %lu parent %lu datasync %d ",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long) __entry->parent, __entry->datasync)
++);
++
++TRACE_EVENT(ext4_sync_file_exit,
++ TP_PROTO(struct inode *inode, int ret),
++
++ TP_ARGS(inode, ret),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->ret = ret;
++ ),
++
++ TP_printk("dev %d,%d ino %lu ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->ret)
++);
++
++TRACE_EVENT(ext4_sync_fs,
++ TP_PROTO(struct super_block *sb, int wait),
++
++ TP_ARGS(sb, wait),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, wait )
++
++ ),
++
++ TP_fast_assign(
++ __entry->dev = sb->s_dev;
++ __entry->wait = wait;
++ ),
++
++ TP_printk("dev %d,%d wait %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->wait)
++);
++
++TRACE_EVENT(ext4_alloc_da_blocks,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( unsigned int, data_blocks )
++ __field( unsigned int, meta_blocks )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
++ __entry->meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
++ ),
++
++ TP_printk("dev %d,%d ino %lu data_blocks %u meta_blocks %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->data_blocks, __entry->meta_blocks)
++);
++
++TRACE_EVENT(ext4_mballoc_alloc,
++ TP_PROTO(struct ext4_allocation_context *ac),
++
++ TP_ARGS(ac),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u32, orig_logical )
++ __field( int, orig_start )
++ __field( __u32, orig_group )
++ __field( int, orig_len )
++ __field( __u32, goal_logical )
++ __field( int, goal_start )
++ __field( __u32, goal_group )
++ __field( int, goal_len )
++ __field( __u32, result_logical )
++ __field( int, result_start )
++ __field( __u32, result_group )
++ __field( int, result_len )
++ __field( __u16, found )
++ __field( __u16, groups )
++ __field( __u16, buddy )
++ __field( __u16, flags )
++ __field( __u16, tail )
++ __field( __u8, cr )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = ac->ac_inode->i_sb->s_dev;
++ __entry->ino = ac->ac_inode->i_ino;
++ __entry->orig_logical = ac->ac_o_ex.fe_logical;
++ __entry->orig_start = ac->ac_o_ex.fe_start;
++ __entry->orig_group = ac->ac_o_ex.fe_group;
++ __entry->orig_len = ac->ac_o_ex.fe_len;
++ __entry->goal_logical = ac->ac_g_ex.fe_logical;
++ __entry->goal_start = ac->ac_g_ex.fe_start;
++ __entry->goal_group = ac->ac_g_ex.fe_group;
++ __entry->goal_len = ac->ac_g_ex.fe_len;
++ __entry->result_logical = ac->ac_f_ex.fe_logical;
++ __entry->result_start = ac->ac_f_ex.fe_start;
++ __entry->result_group = ac->ac_f_ex.fe_group;
++ __entry->result_len = ac->ac_f_ex.fe_len;
++ __entry->found = ac->ac_found;
++ __entry->flags = ac->ac_flags;
++ __entry->groups = ac->ac_groups_scanned;
++ __entry->buddy = ac->ac_buddy;
++ __entry->tail = ac->ac_tail;
++ __entry->cr = ac->ac_criteria;
++ ),
++
++ TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u "
++ "result %u/%d/%u@%u blks %u grps %u cr %u flags 0x%04x "
++ "tail %u broken %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->orig_group, __entry->orig_start,
++ __entry->orig_len, __entry->orig_logical,
++ __entry->goal_group, __entry->goal_start,
++ __entry->goal_len, __entry->goal_logical,
++ __entry->result_group, __entry->result_start,
++ __entry->result_len, __entry->result_logical,
++ __entry->found, __entry->groups, __entry->cr,
++ __entry->flags, __entry->tail,
++ __entry->buddy ? 1 << __entry->buddy : 0)
++);
++
++TRACE_EVENT(ext4_mballoc_prealloc,
++ TP_PROTO(struct ext4_allocation_context *ac),
++
++ TP_ARGS(ac),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u32, orig_logical )
++ __field( int, orig_start )
++ __field( __u32, orig_group )
++ __field( int, orig_len )
++ __field( __u32, result_logical )
++ __field( int, result_start )
++ __field( __u32, result_group )
++ __field( int, result_len )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = ac->ac_inode->i_sb->s_dev;
++ __entry->ino = ac->ac_inode->i_ino;
++ __entry->orig_logical = ac->ac_o_ex.fe_logical;
++ __entry->orig_start = ac->ac_o_ex.fe_start;
++ __entry->orig_group = ac->ac_o_ex.fe_group;
++ __entry->orig_len = ac->ac_o_ex.fe_len;
++ __entry->result_logical = ac->ac_b_ex.fe_logical;
++ __entry->result_start = ac->ac_b_ex.fe_start;
++ __entry->result_group = ac->ac_b_ex.fe_group;
++ __entry->result_len = ac->ac_b_ex.fe_len;
++ ),
++
++ TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u result %u/%d/%u@%u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->orig_group, __entry->orig_start,
++ __entry->orig_len, __entry->orig_logical,
++ __entry->result_group, __entry->result_start,
++ __entry->result_len, __entry->result_logical)
++);
++
++DECLARE_EVENT_CLASS(ext4__mballoc,
++ TP_PROTO(struct super_block *sb,
++ struct inode *inode,
++ ext4_group_t group,
++ ext4_grpblk_t start,
++ ext4_grpblk_t len),
++
++ TP_ARGS(sb, inode, group, start, len),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( int, result_start )
++ __field( __u32, result_group )
++ __field( int, result_len )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = sb->s_dev;
++ __entry->ino = inode ? inode->i_ino : 0;
++ __entry->result_start = start;
++ __entry->result_group = group;
++ __entry->result_len = len;
++ ),
++
++ TP_printk("dev %d,%d inode %lu extent %u/%d/%d ",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->result_group, __entry->result_start,
++ __entry->result_len)
++);
++
++DEFINE_EVENT(ext4__mballoc, ext4_mballoc_discard,
++
++ TP_PROTO(struct super_block *sb,
++ struct inode *inode,
++ ext4_group_t group,
++ ext4_grpblk_t start,
++ ext4_grpblk_t len),
++
++ TP_ARGS(sb, inode, group, start, len)
++);
++
++DEFINE_EVENT(ext4__mballoc, ext4_mballoc_free,
++
++ TP_PROTO(struct super_block *sb,
++ struct inode *inode,
++ ext4_group_t group,
++ ext4_grpblk_t start,
++ ext4_grpblk_t len),
++
++ TP_ARGS(sb, inode, group, start, len)
++);
++
++TRACE_EVENT(ext4_forget,
++ TP_PROTO(struct inode *inode, int is_metadata, __u64 block),
++
++ TP_ARGS(inode, is_metadata, block),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u64, block )
++ __field( int, is_metadata )
++ __field( __u16, mode )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->block = block;
++ __entry->is_metadata = is_metadata;
++ __entry->mode = inode->i_mode;
++ ),
++
++ TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->mode, __entry->is_metadata, __entry->block)
++);
++
++TRACE_EVENT(ext4_da_update_reserve_space,
++ TP_PROTO(struct inode *inode, int used_blocks, int quota_claim),
++
++ TP_ARGS(inode, used_blocks, quota_claim),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u64, i_blocks )
++ __field( int, used_blocks )
++ __field( int, reserved_data_blocks )
++ __field( int, reserved_meta_blocks )
++ __field( int, allocated_meta_blocks )
++ __field( int, quota_claim )
++ __field( __u16, mode )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->i_blocks = inode->i_blocks;
++ __entry->used_blocks = used_blocks;
++ __entry->reserved_data_blocks =
++ EXT4_I(inode)->i_reserved_data_blocks;
++ __entry->reserved_meta_blocks =
++ EXT4_I(inode)->i_reserved_meta_blocks;
++ __entry->allocated_meta_blocks =
++ EXT4_I(inode)->i_allocated_meta_blocks;
++ __entry->quota_claim = quota_claim;
++ __entry->mode = inode->i_mode;
++ ),
++
++ TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d "
++ "reserved_data_blocks %d reserved_meta_blocks %d "
++ "allocated_meta_blocks %d quota_claim %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->mode, __entry->i_blocks,
++ __entry->used_blocks, __entry->reserved_data_blocks,
++ __entry->reserved_meta_blocks, __entry->allocated_meta_blocks,
++ __entry->quota_claim)
++);
++
++TRACE_EVENT(ext4_da_reserve_space,
++ TP_PROTO(struct inode *inode, int md_needed),
++
++ TP_ARGS(inode, md_needed),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u64, i_blocks )
++ __field( int, md_needed )
++ __field( int, reserved_data_blocks )
++ __field( int, reserved_meta_blocks )
++ __field( __u16, mode )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->i_blocks = inode->i_blocks;
++ __entry->md_needed = md_needed;
++ __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
++ __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
++ __entry->mode = inode->i_mode;
++ ),
++
++ TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu md_needed %d "
++ "reserved_data_blocks %d reserved_meta_blocks %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->mode, __entry->i_blocks,
++ __entry->md_needed, __entry->reserved_data_blocks,
++ __entry->reserved_meta_blocks)
++);
++
++TRACE_EVENT(ext4_da_release_space,
++ TP_PROTO(struct inode *inode, int freed_blocks),
++
++ TP_ARGS(inode, freed_blocks),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u64, i_blocks )
++ __field( int, freed_blocks )
++ __field( int, reserved_data_blocks )
++ __field( int, reserved_meta_blocks )
++ __field( int, allocated_meta_blocks )
++ __field( __u16, mode )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->i_blocks = inode->i_blocks;
++ __entry->freed_blocks = freed_blocks;
++ __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
++ __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
++ __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
++ __entry->mode = inode->i_mode;
++ ),
++
++ TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu freed_blocks %d "
++ "reserved_data_blocks %d reserved_meta_blocks %d "
++ "allocated_meta_blocks %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->mode, __entry->i_blocks,
++ __entry->freed_blocks, __entry->reserved_data_blocks,
++ __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
++);
++
++DECLARE_EVENT_CLASS(ext4__bitmap_load,
++ TP_PROTO(struct super_block *sb, unsigned long group),
++
++ TP_ARGS(sb, group),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( __u32, group )
++
++ ),
++
++ TP_fast_assign(
++ __entry->dev = sb->s_dev;
++ __entry->group = group;
++ ),
++
++ TP_printk("dev %d,%d group %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->group)
++);
++
++DEFINE_EVENT(ext4__bitmap_load, ext4_mb_bitmap_load,
++
++ TP_PROTO(struct super_block *sb, unsigned long group),
++
++ TP_ARGS(sb, group)
++);
++
++DEFINE_EVENT(ext4__bitmap_load, ext4_mb_buddy_bitmap_load,
++
++ TP_PROTO(struct super_block *sb, unsigned long group),
++
++ TP_ARGS(sb, group)
++);
++
++DEFINE_EVENT(ext4__bitmap_load, ext4_read_block_bitmap_load,
++
++ TP_PROTO(struct super_block *sb, unsigned long group),
++
++ TP_ARGS(sb, group)
++);
++
++DEFINE_EVENT(ext4__bitmap_load, ext4_load_inode_bitmap,
++
++ TP_PROTO(struct super_block *sb, unsigned long group),
++
++ TP_ARGS(sb, group)
++);
++
++TRACE_EVENT(ext4_direct_IO_enter,
++ TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
++
++ TP_ARGS(inode, offset, len, rw),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( loff_t, pos )
++ __field( unsigned long, len )
++ __field( int, rw )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->pos = offset;
++ __entry->len = len;
++ __entry->rw = rw;
++ ),
++
++ TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->pos, __entry->len, __entry->rw)
++);
++
++TRACE_EVENT(ext4_direct_IO_exit,
++ TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
++ int rw, int ret),
++
++ TP_ARGS(inode, offset, len, rw, ret),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( loff_t, pos )
++ __field( unsigned long, len )
++ __field( int, rw )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->pos = offset;
++ __entry->len = len;
++ __entry->rw = rw;
++ __entry->ret = ret;
++ ),
++
++ TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->pos, __entry->len,
++ __entry->rw, __entry->ret)
++);
++
++TRACE_EVENT(ext4_fallocate_enter,
++ TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode),
++
++ TP_ARGS(inode, offset, len, mode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( loff_t, pos )
++ __field( loff_t, len )
++ __field( int, mode )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->pos = offset;
++ __entry->len = len;
++ __entry->mode = mode;
++ ),
++
++ TP_printk("dev %d,%d ino %lu pos %lld len %lld mode %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->pos,
++ __entry->len, __entry->mode)
++);
++
++TRACE_EVENT(ext4_fallocate_exit,
++ TP_PROTO(struct inode *inode, loff_t offset,
++ unsigned int max_blocks, int ret),
++
++ TP_ARGS(inode, offset, max_blocks, ret),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( loff_t, pos )
++ __field( unsigned int, blocks )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->pos = offset;
++ __entry->blocks = max_blocks;
++ __entry->ret = ret;
++ ),
++
++ TP_printk("dev %d,%d ino %lu pos %lld blocks %u ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->pos, __entry->blocks,
++ __entry->ret)
++);
++
++TRACE_EVENT(ext4_unlink_enter,
++ TP_PROTO(struct inode *parent, struct dentry *dentry),
++
++ TP_ARGS(parent, dentry),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ino_t, parent )
++ __field( loff_t, size )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = dentry->d_inode->i_sb->s_dev;
++ __entry->ino = dentry->d_inode->i_ino;
++ __entry->parent = parent->i_ino;
++ __entry->size = dentry->d_inode->i_size;
++ ),
++
++ TP_printk("dev %d,%d ino %lu size %lld parent %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->size,
++ (unsigned long) __entry->parent)
++);
++
++TRACE_EVENT(ext4_unlink_exit,
++ TP_PROTO(struct dentry *dentry, int ret),
++
++ TP_ARGS(dentry, ret),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = dentry->d_inode->i_sb->s_dev;
++ __entry->ino = dentry->d_inode->i_ino;
++ __entry->ret = ret;
++ ),
++
++ TP_printk("dev %d,%d ino %lu ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->ret)
++);
++
++DECLARE_EVENT_CLASS(ext4__truncate,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( __u64, blocks )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->blocks = inode->i_blocks;
++ ),
++
++ TP_printk("dev %d,%d ino %lu blocks %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino, __entry->blocks)
++);
++
++DEFINE_EVENT(ext4__truncate, ext4_truncate_enter,
++
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode)
++);
++
++DEFINE_EVENT(ext4__truncate, ext4_truncate_exit,
++
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode)
++);
++
++/* 'ux' is the uninitialized extent. */
++TRACE_EVENT(ext4_ext_convert_to_initialized_enter,
++ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
++ struct ext4_extent *ux),
++
++ TP_ARGS(inode, map, ux),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, m_lblk )
++ __field( unsigned, m_len )
++ __field( ext4_lblk_t, u_lblk )
++ __field( unsigned, u_len )
++ __field( ext4_fsblk_t, u_pblk )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->m_lblk = map->m_lblk;
++ __entry->m_len = map->m_len;
++ __entry->u_lblk = le32_to_cpu(ux->ee_block);
++ __entry->u_len = ext4_ext_get_actual_len(ux);
++ __entry->u_pblk = ext4_ext_pblock(ux);
++ ),
++
++ TP_printk("dev %d,%d ino %lu m_lblk %u m_len %u u_lblk %u u_len %u "
++ "u_pblk %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->m_lblk, __entry->m_len,
++ __entry->u_lblk, __entry->u_len, __entry->u_pblk)
++);
++
++/*
++ * 'ux' is the uninitialized extent.
++ * 'ix' is the initialized extent to which blocks are transferred.
++ */
++TRACE_EVENT(ext4_ext_convert_to_initialized_fastpath,
++ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
++ struct ext4_extent *ux, struct ext4_extent *ix),
++
++ TP_ARGS(inode, map, ux, ix),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, m_lblk )
++ __field( unsigned, m_len )
++ __field( ext4_lblk_t, u_lblk )
++ __field( unsigned, u_len )
++ __field( ext4_fsblk_t, u_pblk )
++ __field( ext4_lblk_t, i_lblk )
++ __field( unsigned, i_len )
++ __field( ext4_fsblk_t, i_pblk )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->m_lblk = map->m_lblk;
++ __entry->m_len = map->m_len;
++ __entry->u_lblk = le32_to_cpu(ux->ee_block);
++ __entry->u_len = ext4_ext_get_actual_len(ux);
++ __entry->u_pblk = ext4_ext_pblock(ux);
++ __entry->i_lblk = le32_to_cpu(ix->ee_block);
++ __entry->i_len = ext4_ext_get_actual_len(ix);
++ __entry->i_pblk = ext4_ext_pblock(ix);
++ ),
++
++ TP_printk("dev %d,%d ino %lu m_lblk %u m_len %u "
++ "u_lblk %u u_len %u u_pblk %llu "
++ "i_lblk %u i_len %u i_pblk %llu ",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->m_lblk, __entry->m_len,
++ __entry->u_lblk, __entry->u_len, __entry->u_pblk,
++ __entry->i_lblk, __entry->i_len, __entry->i_pblk)
++);
++
++DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
++ unsigned int len, unsigned int flags),
++
++ TP_ARGS(inode, lblk, len, flags),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, lblk )
++ __field( unsigned int, len )
++ __field( unsigned int, flags )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->lblk = lblk;
++ __entry->len = len;
++ __entry->flags = flags;
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %u len %u flags %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->lblk, __entry->len, __entry->flags)
++);
++
++DEFINE_EVENT(ext4__map_blocks_enter, ext4_ext_map_blocks_enter,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
++ unsigned len, unsigned flags),
++
++ TP_ARGS(inode, lblk, len, flags)
++);
++
++DEFINE_EVENT(ext4__map_blocks_enter, ext4_ind_map_blocks_enter,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
++ unsigned len, unsigned flags),
++
++ TP_ARGS(inode, lblk, len, flags)
++);
++
++DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
++ ext4_fsblk_t pblk, unsigned int len, int ret),
++
++ TP_ARGS(inode, lblk, pblk, len, ret),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_fsblk_t, pblk )
++ __field( ext4_lblk_t, lblk )
++ __field( unsigned int, len )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->pblk = pblk;
++ __entry->lblk = lblk;
++ __entry->len = len;
++ __entry->ret = ret;
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->lblk, __entry->pblk,
++ __entry->len, __entry->ret)
++);
++
++DEFINE_EVENT(ext4__map_blocks_exit, ext4_ext_map_blocks_exit,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
++ ext4_fsblk_t pblk, unsigned len, int ret),
++
++ TP_ARGS(inode, lblk, pblk, len, ret)
++);
++
++DEFINE_EVENT(ext4__map_blocks_exit, ext4_ind_map_blocks_exit,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
++ ext4_fsblk_t pblk, unsigned len, int ret),
++
++ TP_ARGS(inode, lblk, pblk, len, ret)
++);
++
++TRACE_EVENT(ext4_ext_load_extent,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk),
++
++ TP_ARGS(inode, lblk, pblk),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_fsblk_t, pblk )
++ __field( ext4_lblk_t, lblk )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->pblk = pblk;
++ __entry->lblk = lblk;
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %u pblk %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ __entry->lblk, __entry->pblk)
++);
++
++TRACE_EVENT(ext4_load_inode,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ ),
++
++ TP_printk("dev %d,%d ino %ld",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino)
++);
++
++TRACE_EVENT(ext4_journal_start,
++ TP_PROTO(struct super_block *sb, int nblocks, unsigned long IP),
++
++ TP_ARGS(sb, nblocks, IP),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field(unsigned long, ip )
++ __field( int, nblocks )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = sb->s_dev;
++ __entry->ip = IP;
++ __entry->nblocks = nblocks;
++ ),
++
++ TP_printk("dev %d,%d nblocks %d caller %pF",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->nblocks, (void *)__entry->ip)
++);
++
++DECLARE_EVENT_CLASS(ext4__trim,
++ TP_PROTO(struct super_block *sb,
++ ext4_group_t group,
++ ext4_grpblk_t start,
++ ext4_grpblk_t len),
++
++ TP_ARGS(sb, group, start, len),
++
++ TP_STRUCT__entry(
++ __field( int, dev_major )
++ __field( int, dev_minor )
++ __field( __u32, group )
++ __field( int, start )
++ __field( int, len )
++ ),
++
++ TP_fast_assign(
++ __entry->dev_major = MAJOR(sb->s_dev);
++ __entry->dev_minor = MINOR(sb->s_dev);
++ __entry->group = group;
++ __entry->start = start;
++ __entry->len = len;
++ ),
++
++ TP_printk("dev %d,%d group %u, start %d, len %d",
++ __entry->dev_major, __entry->dev_minor,
++ __entry->group, __entry->start, __entry->len)
++);
++
++DEFINE_EVENT(ext4__trim, ext4_trim_extent,
++
++ TP_PROTO(struct super_block *sb,
++ ext4_group_t group,
++ ext4_grpblk_t start,
++ ext4_grpblk_t len),
++
++ TP_ARGS(sb, group, start, len)
++);
++
++DEFINE_EVENT(ext4__trim, ext4_trim_all_free,
++
++ TP_PROTO(struct super_block *sb,
++ ext4_group_t group,
++ ext4_grpblk_t start,
++ ext4_grpblk_t len),
++
++ TP_ARGS(sb, group, start, len)
++);
++
++TRACE_EVENT(ext4_ext_handle_uninitialized_extents,
++ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
++ unsigned int allocated, ext4_fsblk_t newblock),
++
++ TP_ARGS(inode, map, allocated, newblock),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( int, flags )
++ __field( ext4_lblk_t, lblk )
++ __field( ext4_fsblk_t, pblk )
++ __field( unsigned int, len )
++ __field( unsigned int, allocated )
++ __field( ext4_fsblk_t, newblk )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->flags = map->m_flags;
++ __entry->lblk = map->m_lblk;
++ __entry->pblk = map->m_pblk;
++ __entry->len = map->m_len;
++ __entry->allocated = allocated;
++ __entry->newblk = newblock;
++ ),
++
++ TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %d"
++ "allocated %d newblock %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->lblk, (unsigned long long) __entry->pblk,
++ __entry->len, __entry->flags,
++ (unsigned int) __entry->allocated,
++ (unsigned long long) __entry->newblk)
++);
++
++TRACE_EVENT(ext4_get_implied_cluster_alloc_exit,
++ TP_PROTO(struct super_block *sb, struct ext4_map_blocks *map, int ret),
++
++ TP_ARGS(sb, map, ret),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( unsigned int, flags )
++ __field( ext4_lblk_t, lblk )
++ __field( ext4_fsblk_t, pblk )
++ __field( unsigned int, len )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = sb->s_dev;
++ __entry->flags = map->m_flags;
++ __entry->lblk = map->m_lblk;
++ __entry->pblk = map->m_pblk;
++ __entry->len = map->m_len;
++ __entry->ret = ret;
++ ),
++
++ TP_printk("dev %d,%d m_lblk %u m_pblk %llu m_len %u m_flags %u ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->lblk, (unsigned long long) __entry->pblk,
++ __entry->len, __entry->flags, __entry->ret)
++);
++
++TRACE_EVENT(ext4_ext_put_in_cache,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len,
++ ext4_fsblk_t start),
++
++ TP_ARGS(inode, lblk, len, start),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, lblk )
++ __field( unsigned int, len )
++ __field( ext4_fsblk_t, start )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->lblk = lblk;
++ __entry->len = len;
++ __entry->start = start;
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %u len %u start %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->lblk,
++ __entry->len,
++ (unsigned long long) __entry->start)
++);
++
++TRACE_EVENT(ext4_ext_in_cache,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk, int ret),
++
++ TP_ARGS(inode, lblk, ret),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, lblk )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->lblk = lblk;
++ __entry->ret = ret;
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %u ret %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->lblk,
++ __entry->ret)
++
++);
++
++TRACE_EVENT(ext4_find_delalloc_range,
++ TP_PROTO(struct inode *inode, ext4_lblk_t from, ext4_lblk_t to,
++ int reverse, int found, ext4_lblk_t found_blk),
++
++ TP_ARGS(inode, from, to, reverse, found, found_blk),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, from )
++ __field( ext4_lblk_t, to )
++ __field( int, reverse )
++ __field( int, found )
++ __field( ext4_lblk_t, found_blk )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->from = from;
++ __entry->to = to;
++ __entry->reverse = reverse;
++ __entry->found = found;
++ __entry->found_blk = found_blk;
++ ),
++
++ TP_printk("dev %d,%d ino %lu from %u to %u reverse %d found %d "
++ "(blk = %u)",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->from, (unsigned) __entry->to,
++ __entry->reverse, __entry->found,
++ (unsigned) __entry->found_blk)
++);
++
++TRACE_EVENT(ext4_get_reserved_cluster_alloc,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len),
++
++ TP_ARGS(inode, lblk, len),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, lblk )
++ __field( unsigned int, len )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->lblk = lblk;
++ __entry->len = len;
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %u len %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->lblk,
++ __entry->len)
++);
++
++TRACE_EVENT(ext4_ext_show_extent,
++ TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
++ unsigned short len),
++
++ TP_ARGS(inode, lblk, pblk, len),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_fsblk_t, pblk )
++ __field( ext4_lblk_t, lblk )
++ __field( unsigned short, len )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->pblk = pblk;
++ __entry->lblk = lblk;
++ __entry->len = len;
++ ),
++
++ TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->lblk,
++ (unsigned long long) __entry->pblk,
++ (unsigned short) __entry->len)
++);
++
++TRACE_EVENT(ext4_remove_blocks,
++ TP_PROTO(struct inode *inode, struct ext4_extent *ex,
++ ext4_lblk_t from, ext4_fsblk_t to,
++ ext4_fsblk_t partial_cluster),
++
++ TP_ARGS(inode, ex, from, to, partial_cluster),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, from )
++ __field( ext4_lblk_t, to )
++ __field( ext4_fsblk_t, partial )
++ __field( ext4_fsblk_t, ee_pblk )
++ __field( ext4_lblk_t, ee_lblk )
++ __field( unsigned short, ee_len )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->from = from;
++ __entry->to = to;
++ __entry->partial = partial_cluster;
++ __entry->ee_pblk = ext4_ext_pblock(ex);
++ __entry->ee_lblk = cpu_to_le32(ex->ee_block);
++ __entry->ee_len = ext4_ext_get_actual_len(ex);
++ ),
++
++ TP_printk("dev %d,%d ino %lu extent [%u(%llu), %u]"
++ "from %u to %u partial_cluster %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->ee_lblk,
++ (unsigned long long) __entry->ee_pblk,
++ (unsigned short) __entry->ee_len,
++ (unsigned) __entry->from,
++ (unsigned) __entry->to,
++ (unsigned) __entry->partial)
++);
++
++TRACE_EVENT(ext4_ext_rm_leaf,
++ TP_PROTO(struct inode *inode, ext4_lblk_t start,
++ struct ext4_extent *ex, ext4_fsblk_t partial_cluster),
++
++ TP_ARGS(inode, start, ex, partial_cluster),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_fsblk_t, partial )
++ __field( ext4_lblk_t, start )
++ __field( ext4_lblk_t, ee_lblk )
++ __field( ext4_fsblk_t, ee_pblk )
++ __field( short, ee_len )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->partial = partial_cluster;
++ __entry->start = start;
++ __entry->ee_lblk = le32_to_cpu(ex->ee_block);
++ __entry->ee_pblk = ext4_ext_pblock(ex);
++ __entry->ee_len = ext4_ext_get_actual_len(ex);
++ ),
++
++ TP_printk("dev %d,%d ino %lu start_lblk %u last_extent [%u(%llu), %u]"
++ "partial_cluster %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->start,
++ (unsigned) __entry->ee_lblk,
++ (unsigned long long) __entry->ee_pblk,
++ (unsigned short) __entry->ee_len,
++ (unsigned) __entry->partial)
++);
++
++TRACE_EVENT(ext4_ext_rm_idx,
++ TP_PROTO(struct inode *inode, ext4_fsblk_t pblk),
++
++ TP_ARGS(inode, pblk),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_fsblk_t, pblk )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->pblk = pblk;
++ ),
++
++ TP_printk("dev %d,%d ino %lu index_pblk %llu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned long long) __entry->pblk)
++);
++
++TRACE_EVENT(ext4_ext_remove_space,
++ TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth),
++
++ TP_ARGS(inode, start, depth),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, start )
++ __field( int, depth )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->start = start;
++ __entry->depth = depth;
++ ),
++
++ TP_printk("dev %d,%d ino %lu since %u depth %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->start,
++ __entry->depth)
++);
++
++TRACE_EVENT(ext4_ext_remove_space_done,
++ TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth,
++ ext4_lblk_t partial, unsigned short eh_entries),
++
++ TP_ARGS(inode, start, depth, partial, eh_entries),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ __field( ext4_lblk_t, start )
++ __field( int, depth )
++ __field( ext4_lblk_t, partial )
++ __field( unsigned short, eh_entries )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ __entry->start = start;
++ __entry->depth = depth;
++ __entry->partial = partial;
++ __entry->eh_entries = eh_entries;
++ ),
++
++ TP_printk("dev %d,%d ino %lu since %u depth %d partial %u "
++ "remaining_entries %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino,
++ (unsigned) __entry->start,
++ __entry->depth,
++ (unsigned) __entry->partial,
++ (unsigned short) __entry->eh_entries)
++);
++
++#endif /* _TRACE_EXT4_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/gpio.h
+@@ -0,0 +1,56 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM gpio
++
++#if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_GPIO_H
++
++#include <linux/tracepoint.h>
++
++TRACE_EVENT(gpio_direction,
++
++ TP_PROTO(unsigned gpio, int in, int err),
++
++ TP_ARGS(gpio, in, err),
++
++ TP_STRUCT__entry(
++ __field(unsigned, gpio)
++ __field(int, in)
++ __field(int, err)
++ ),
++
++ TP_fast_assign(
++ __entry->gpio = gpio;
++ __entry->in = in;
++ __entry->err = err;
++ ),
++
++ TP_printk("%u %3s (%d)", __entry->gpio,
++ __entry->in ? "in" : "out", __entry->err)
++);
++
++TRACE_EVENT(gpio_value,
++
++ TP_PROTO(unsigned gpio, int get, int value),
++
++ TP_ARGS(gpio, get, value),
++
++ TP_STRUCT__entry(
++ __field(unsigned, gpio)
++ __field(int, get)
++ __field(int, value)
++ ),
++
++ TP_fast_assign(
++ __entry->gpio = gpio;
++ __entry->get = get;
++ __entry->value = value;
++ ),
++
++ TP_printk("%u %3s %d", __entry->gpio,
++ __entry->get ? "get" : "set", __entry->value)
++);
++
++#endif /* if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ) */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/irq.h
+@@ -0,0 +1,150 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM irq
++
++#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_IRQ_H
++
++#include <linux/tracepoint.h>
++
++struct irqaction;
++struct softirq_action;
++
++#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
++#define show_softirq_name(val) \
++ __print_symbolic(val, \
++ softirq_name(HI), \
++ softirq_name(TIMER), \
++ softirq_name(NET_TX), \
++ softirq_name(NET_RX), \
++ softirq_name(BLOCK), \
++ softirq_name(BLOCK_IOPOLL), \
++ softirq_name(TASKLET), \
++ softirq_name(SCHED), \
++ softirq_name(HRTIMER), \
++ softirq_name(RCU))
++
++/**
++ * irq_handler_entry - called immediately before the irq action handler
++ * @irq: irq number
++ * @action: pointer to struct irqaction
++ *
++ * The struct irqaction pointed to by @action contains various
++ * information about the handler, including the device name,
++ * @action->name, and the device id, @action->dev_id. When used in
++ * conjunction with the irq_handler_exit tracepoint, we can figure
++ * out irq handler latencies.
++ */
++TRACE_EVENT(irq_handler_entry,
++
++ TP_PROTO(int irq, struct irqaction *action),
++
++ TP_ARGS(irq, action),
++
++ TP_STRUCT__entry(
++ __field( int, irq )
++ __string( name, action->name )
++ ),
++
++ TP_fast_assign(
++ __entry->irq = irq;
++ __assign_str(name, action->name);
++ ),
++
++ TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
++);
++
++/**
++ * irq_handler_exit - called immediately after the irq action handler returns
++ * @irq: irq number
++ * @action: pointer to struct irqaction
++ * @ret: return value
++ *
++ * If the @ret value is set to IRQ_HANDLED, then we know that the corresponding
++ * @action->handler scuccessully handled this irq. Otherwise, the irq might be
++ * a shared irq line, or the irq was not handled successfully. Can be used in
++ * conjunction with the irq_handler_entry to understand irq handler latencies.
++ */
++TRACE_EVENT(irq_handler_exit,
++
++ TP_PROTO(int irq, struct irqaction *action, int ret),
++
++ TP_ARGS(irq, action, ret),
++
++ TP_STRUCT__entry(
++ __field( int, irq )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ __entry->irq = irq;
++ __entry->ret = ret;
++ ),
++
++ TP_printk("irq=%d ret=%s",
++ __entry->irq, __entry->ret ? "handled" : "unhandled")
++);
++
++DECLARE_EVENT_CLASS(softirq,
++
++ TP_PROTO(unsigned int vec_nr),
++
++ TP_ARGS(vec_nr),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, vec )
++ ),
++
++ TP_fast_assign(
++ __entry->vec = vec_nr;
++ ),
++
++ TP_printk("vec=%u [action=%s]", __entry->vec,
++ show_softirq_name(__entry->vec))
++);
++
++/**
++ * softirq_entry - called immediately before the softirq handler
++ * @vec_nr: softirq vector number
++ *
++ * When used in combination with the softirq_exit tracepoint
++ * we can determine the softirq handler runtine.
++ */
++DEFINE_EVENT(softirq, softirq_entry,
++
++ TP_PROTO(unsigned int vec_nr),
++
++ TP_ARGS(vec_nr)
++);
++
++/**
++ * softirq_exit - called immediately after the softirq handler returns
++ * @vec_nr: softirq vector number
++ *
++ * When used in combination with the softirq_entry tracepoint
++ * we can determine the softirq handler runtine.
++ */
++DEFINE_EVENT(softirq, softirq_exit,
++
++ TP_PROTO(unsigned int vec_nr),
++
++ TP_ARGS(vec_nr)
++);
++
++/**
++ * softirq_raise - called immediately when a softirq is raised
++ * @vec_nr: softirq vector number
++ *
++ * When used in combination with the softirq_entry tracepoint
++ * we can determine the softirq raise to run latency.
++ */
++DEFINE_EVENT(softirq, softirq_raise,
++
++ TP_PROTO(unsigned int vec_nr),
++
++ TP_ARGS(vec_nr)
++);
++
++#endif /* _TRACE_IRQ_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/jbd.h
+@@ -0,0 +1,194 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM jbd
++
++#if !defined(_TRACE_JBD_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_JBD_H
++
++#include <linux/jbd.h>
++#include <linux/tracepoint.h>
++
++TRACE_EVENT(jbd_checkpoint,
++
++ TP_PROTO(journal_t *journal, int result),
++
++ TP_ARGS(journal, result),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, result )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = journal->j_fs_dev->bd_dev;
++ __entry->result = result;
++ ),
++
++ TP_printk("dev %d,%d result %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->result)
++);
++
++DECLARE_EVENT_CLASS(jbd_commit,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, transaction )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = journal->j_fs_dev->bd_dev;
++ __entry->transaction = commit_transaction->t_tid;
++ ),
++
++ TP_printk("dev %d,%d transaction %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->transaction)
++);
++
++DEFINE_EVENT(jbd_commit, jbd_start_commit,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction)
++);
++
++DEFINE_EVENT(jbd_commit, jbd_commit_locking,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction)
++);
++
++DEFINE_EVENT(jbd_commit, jbd_commit_flushing,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction)
++);
++
++DEFINE_EVENT(jbd_commit, jbd_commit_logging,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction)
++);
++
++TRACE_EVENT(jbd_drop_transaction,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, transaction )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = journal->j_fs_dev->bd_dev;
++ __entry->transaction = commit_transaction->t_tid;
++ ),
++
++ TP_printk("dev %d,%d transaction %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->transaction)
++);
++
++TRACE_EVENT(jbd_end_commit,
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, transaction )
++ __field( int, head )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = journal->j_fs_dev->bd_dev;
++ __entry->transaction = commit_transaction->t_tid;
++ __entry->head = journal->j_tail_sequence;
++ ),
++
++ TP_printk("dev %d,%d transaction %d head %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->transaction, __entry->head)
++);
++
++TRACE_EVENT(jbd_do_submit_data,
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, transaction )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = journal->j_fs_dev->bd_dev;
++ __entry->transaction = commit_transaction->t_tid;
++ ),
++
++ TP_printk("dev %d,%d transaction %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->transaction)
++);
++
++TRACE_EVENT(jbd_cleanup_journal_tail,
++
++ TP_PROTO(journal_t *journal, tid_t first_tid,
++ unsigned long block_nr, unsigned long freed),
++
++ TP_ARGS(journal, first_tid, block_nr, freed),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( tid_t, tail_sequence )
++ __field( tid_t, first_tid )
++ __field(unsigned long, block_nr )
++ __field(unsigned long, freed )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = journal->j_fs_dev->bd_dev;
++ __entry->tail_sequence = journal->j_tail_sequence;
++ __entry->first_tid = first_tid;
++ __entry->block_nr = block_nr;
++ __entry->freed = freed;
++ ),
++
++ TP_printk("dev %d,%d from %u to %u offset %lu freed %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->tail_sequence, __entry->first_tid,
++ __entry->block_nr, __entry->freed)
++);
++
++TRACE_EVENT(journal_write_superblock,
++ TP_PROTO(journal_t *journal, int write_op),
++
++ TP_ARGS(journal, write_op),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, write_op )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = journal->j_fs_dev->bd_dev;
++ __entry->write_op = write_op;
++ ),
++
++ TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev),
++ MINOR(__entry->dev), __entry->write_op)
++);
++
++#endif /* _TRACE_JBD_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/jbd2.h
+@@ -0,0 +1,262 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM jbd2
++
++#if !defined(_TRACE_JBD2_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_JBD2_H
++
++#include <linux/jbd2.h>
++#include <linux/tracepoint.h>
++
++struct transaction_chp_stats_s;
++struct transaction_run_stats_s;
++
++TRACE_EVENT(jbd2_checkpoint,
++
++ TP_PROTO(journal_t *journal, int result),
++
++ TP_ARGS(journal, result),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, result )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = journal->j_fs_dev->bd_dev;
++ __entry->result = result;
++ ),
++
++ TP_printk("dev %d,%d result %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->result)
++);
++
++DECLARE_EVENT_CLASS(jbd2_commit,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( char, sync_commit )
++ __field( int, transaction )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = journal->j_fs_dev->bd_dev;
++ __entry->sync_commit = commit_transaction->t_synchronous_commit;
++ __entry->transaction = commit_transaction->t_tid;
++ ),
++
++ TP_printk("dev %d,%d transaction %d sync %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->transaction, __entry->sync_commit)
++);
++
++DEFINE_EVENT(jbd2_commit, jbd2_start_commit,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction)
++);
++
++DEFINE_EVENT(jbd2_commit, jbd2_commit_locking,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction)
++);
++
++DEFINE_EVENT(jbd2_commit, jbd2_commit_flushing,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction)
++);
++
++DEFINE_EVENT(jbd2_commit, jbd2_commit_logging,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction)
++);
++
++DEFINE_EVENT(jbd2_commit, jbd2_drop_transaction,
++
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction)
++);
++
++TRACE_EVENT(jbd2_end_commit,
++ TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
++
++ TP_ARGS(journal, commit_transaction),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( char, sync_commit )
++ __field( int, transaction )
++ __field( int, head )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = journal->j_fs_dev->bd_dev;
++ __entry->sync_commit = commit_transaction->t_synchronous_commit;
++ __entry->transaction = commit_transaction->t_tid;
++ __entry->head = journal->j_tail_sequence;
++ ),
++
++ TP_printk("dev %d,%d transaction %d sync %d head %d",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->transaction, __entry->sync_commit, __entry->head)
++);
++
++TRACE_EVENT(jbd2_submit_inode_data,
++ TP_PROTO(struct inode *inode),
++
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( ino_t, ino )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = inode->i_sb->s_dev;
++ __entry->ino = inode->i_ino;
++ ),
++
++ TP_printk("dev %d,%d ino %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ (unsigned long) __entry->ino)
++);
++
++TRACE_EVENT(jbd2_run_stats,
++ TP_PROTO(dev_t dev, unsigned long tid,
++ struct transaction_run_stats_s *stats),
++
++ TP_ARGS(dev, tid, stats),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( unsigned long, tid )
++ __field( unsigned long, wait )
++ __field( unsigned long, running )
++ __field( unsigned long, locked )
++ __field( unsigned long, flushing )
++ __field( unsigned long, logging )
++ __field( __u32, handle_count )
++ __field( __u32, blocks )
++ __field( __u32, blocks_logged )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = dev;
++ __entry->tid = tid;
++ __entry->wait = stats->rs_wait;
++ __entry->running = stats->rs_running;
++ __entry->locked = stats->rs_locked;
++ __entry->flushing = stats->rs_flushing;
++ __entry->logging = stats->rs_logging;
++ __entry->handle_count = stats->rs_handle_count;
++ __entry->blocks = stats->rs_blocks;
++ __entry->blocks_logged = stats->rs_blocks_logged;
++ ),
++
++ TP_printk("dev %d,%d tid %lu wait %u running %u locked %u flushing %u "
++ "logging %u handle_count %u blocks %u blocks_logged %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
++ jiffies_to_msecs(__entry->wait),
++ jiffies_to_msecs(__entry->running),
++ jiffies_to_msecs(__entry->locked),
++ jiffies_to_msecs(__entry->flushing),
++ jiffies_to_msecs(__entry->logging),
++ __entry->handle_count, __entry->blocks,
++ __entry->blocks_logged)
++);
++
++TRACE_EVENT(jbd2_checkpoint_stats,
++ TP_PROTO(dev_t dev, unsigned long tid,
++ struct transaction_chp_stats_s *stats),
++
++ TP_ARGS(dev, tid, stats),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( unsigned long, tid )
++ __field( unsigned long, chp_time )
++ __field( __u32, forced_to_close )
++ __field( __u32, written )
++ __field( __u32, dropped )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = dev;
++ __entry->tid = tid;
++ __entry->chp_time = stats->cs_chp_time;
++ __entry->forced_to_close= stats->cs_forced_to_close;
++ __entry->written = stats->cs_written;
++ __entry->dropped = stats->cs_dropped;
++ ),
++
++ TP_printk("dev %d,%d tid %lu chp_time %u forced_to_close %u "
++ "written %u dropped %u",
++ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
++ jiffies_to_msecs(__entry->chp_time),
++ __entry->forced_to_close, __entry->written, __entry->dropped)
++);
++
++TRACE_EVENT(jbd2_update_log_tail,
++
++ TP_PROTO(journal_t *journal, tid_t first_tid,
++ unsigned long block_nr, unsigned long freed),
++
++ TP_ARGS(journal, first_tid, block_nr, freed),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( tid_t, tail_sequence )
++ __field( tid_t, first_tid )
++ __field(unsigned long, block_nr )
++ __field(unsigned long, freed )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = journal->j_fs_dev->bd_dev;
++ __entry->tail_sequence = journal->j_tail_sequence;
++ __entry->first_tid = first_tid;
++ __entry->block_nr = block_nr;
++ __entry->freed = freed;
++ ),
++
++ TP_printk("dev %d,%d from %u to %u offset %lu freed %lu",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->tail_sequence, __entry->first_tid,
++ __entry->block_nr, __entry->freed)
++);
++
++TRACE_EVENT(jbd2_write_superblock,
++
++ TP_PROTO(journal_t *journal, int write_op),
++
++ TP_ARGS(journal, write_op),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( int, write_op )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = journal->j_fs_dev->bd_dev;
++ __entry->write_op = write_op;
++ ),
++
++ TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev),
++ MINOR(__entry->dev), __entry->write_op)
++);
++
++#endif /* _TRACE_JBD2_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/kmem.h
+@@ -0,0 +1,308 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM kmem
++
++#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_KMEM_H
++
++#include <linux/types.h>
++#include <linux/tracepoint.h>
++#include <trace/events/gfpflags.h>
++
++DECLARE_EVENT_CLASS(kmem_alloc,
++
++ TP_PROTO(unsigned long call_site,
++ const void *ptr,
++ size_t bytes_req,
++ size_t bytes_alloc,
++ gfp_t gfp_flags),
++
++ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
++
++ TP_STRUCT__entry(
++ __field( unsigned long, call_site )
++ __field( const void *, ptr )
++ __field( size_t, bytes_req )
++ __field( size_t, bytes_alloc )
++ __field( gfp_t, gfp_flags )
++ ),
++
++ TP_fast_assign(
++ __entry->call_site = call_site;
++ __entry->ptr = ptr;
++ __entry->bytes_req = bytes_req;
++ __entry->bytes_alloc = bytes_alloc;
++ __entry->gfp_flags = gfp_flags;
++ ),
++
++ TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
++ __entry->call_site,
++ __entry->ptr,
++ __entry->bytes_req,
++ __entry->bytes_alloc,
++ show_gfp_flags(__entry->gfp_flags))
++);
++
++DEFINE_EVENT(kmem_alloc, kmalloc,
++
++ TP_PROTO(unsigned long call_site, const void *ptr,
++ size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
++
++ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
++);
++
++DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
++
++ TP_PROTO(unsigned long call_site, const void *ptr,
++ size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
++
++ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
++);
++
++DECLARE_EVENT_CLASS(kmem_alloc_node,
++
++ TP_PROTO(unsigned long call_site,
++ const void *ptr,
++ size_t bytes_req,
++ size_t bytes_alloc,
++ gfp_t gfp_flags,
++ int node),
++
++ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
++
++ TP_STRUCT__entry(
++ __field( unsigned long, call_site )
++ __field( const void *, ptr )
++ __field( size_t, bytes_req )
++ __field( size_t, bytes_alloc )
++ __field( gfp_t, gfp_flags )
++ __field( int, node )
++ ),
++
++ TP_fast_assign(
++ __entry->call_site = call_site;
++ __entry->ptr = ptr;
++ __entry->bytes_req = bytes_req;
++ __entry->bytes_alloc = bytes_alloc;
++ __entry->gfp_flags = gfp_flags;
++ __entry->node = node;
++ ),
++
++ TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
++ __entry->call_site,
++ __entry->ptr,
++ __entry->bytes_req,
++ __entry->bytes_alloc,
++ show_gfp_flags(__entry->gfp_flags),
++ __entry->node)
++);
++
++DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
++
++ TP_PROTO(unsigned long call_site, const void *ptr,
++ size_t bytes_req, size_t bytes_alloc,
++ gfp_t gfp_flags, int node),
++
++ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
++);
++
++DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
++
++ TP_PROTO(unsigned long call_site, const void *ptr,
++ size_t bytes_req, size_t bytes_alloc,
++ gfp_t gfp_flags, int node),
++
++ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
++);
++
++DECLARE_EVENT_CLASS(kmem_free,
++
++ TP_PROTO(unsigned long call_site, const void *ptr),
++
++ TP_ARGS(call_site, ptr),
++
++ TP_STRUCT__entry(
++ __field( unsigned long, call_site )
++ __field( const void *, ptr )
++ ),
++
++ TP_fast_assign(
++ __entry->call_site = call_site;
++ __entry->ptr = ptr;
++ ),
++
++ TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
++);
++
++DEFINE_EVENT(kmem_free, kfree,
++
++ TP_PROTO(unsigned long call_site, const void *ptr),
++
++ TP_ARGS(call_site, ptr)
++);
++
++DEFINE_EVENT(kmem_free, kmem_cache_free,
++
++ TP_PROTO(unsigned long call_site, const void *ptr),
++
++ TP_ARGS(call_site, ptr)
++);
++
++TRACE_EVENT(mm_page_free,
++
++ TP_PROTO(struct page *page, unsigned int order),
++
++ TP_ARGS(page, order),
++
++ TP_STRUCT__entry(
++ __field( struct page *, page )
++ __field( unsigned int, order )
++ ),
++
++ TP_fast_assign(
++ __entry->page = page;
++ __entry->order = order;
++ ),
++
++ TP_printk("page=%p pfn=%lu order=%d",
++ __entry->page,
++ page_to_pfn(__entry->page),
++ __entry->order)
++);
++
++TRACE_EVENT(mm_page_free_batched,
++
++ TP_PROTO(struct page *page, int cold),
++
++ TP_ARGS(page, cold),
++
++ TP_STRUCT__entry(
++ __field( struct page *, page )
++ __field( int, cold )
++ ),
++
++ TP_fast_assign(
++ __entry->page = page;
++ __entry->cold = cold;
++ ),
++
++ TP_printk("page=%p pfn=%lu order=0 cold=%d",
++ __entry->page,
++ page_to_pfn(__entry->page),
++ __entry->cold)
++);
++
++TRACE_EVENT(mm_page_alloc,
++
++ TP_PROTO(struct page *page, unsigned int order,
++ gfp_t gfp_flags, int migratetype),
++
++ TP_ARGS(page, order, gfp_flags, migratetype),
++
++ TP_STRUCT__entry(
++ __field( struct page *, page )
++ __field( unsigned int, order )
++ __field( gfp_t, gfp_flags )
++ __field( int, migratetype )
++ ),
++
++ TP_fast_assign(
++ __entry->page = page;
++ __entry->order = order;
++ __entry->gfp_flags = gfp_flags;
++ __entry->migratetype = migratetype;
++ ),
++
++ TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
++ __entry->page,
++ __entry->page ? page_to_pfn(__entry->page) : 0,
++ __entry->order,
++ __entry->migratetype,
++ show_gfp_flags(__entry->gfp_flags))
++);
++
++DECLARE_EVENT_CLASS(mm_page,
++
++ TP_PROTO(struct page *page, unsigned int order, int migratetype),
++
++ TP_ARGS(page, order, migratetype),
++
++ TP_STRUCT__entry(
++ __field( struct page *, page )
++ __field( unsigned int, order )
++ __field( int, migratetype )
++ ),
++
++ TP_fast_assign(
++ __entry->page = page;
++ __entry->order = order;
++ __entry->migratetype = migratetype;
++ ),
++
++ TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
++ __entry->page,
++ __entry->page ? page_to_pfn(__entry->page) : 0,
++ __entry->order,
++ __entry->migratetype,
++ __entry->order == 0)
++);
++
++DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
++
++ TP_PROTO(struct page *page, unsigned int order, int migratetype),
++
++ TP_ARGS(page, order, migratetype)
++);
++
++DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,
++
++ TP_PROTO(struct page *page, unsigned int order, int migratetype),
++
++ TP_ARGS(page, order, migratetype),
++
++ TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
++ __entry->page, page_to_pfn(__entry->page),
++ __entry->order, __entry->migratetype)
++);
++
++TRACE_EVENT(mm_page_alloc_extfrag,
++
++ TP_PROTO(struct page *page,
++ int alloc_order, int fallback_order,
++ int alloc_migratetype, int fallback_migratetype),
++
++ TP_ARGS(page,
++ alloc_order, fallback_order,
++ alloc_migratetype, fallback_migratetype),
++
++ TP_STRUCT__entry(
++ __field( struct page *, page )
++ __field( int, alloc_order )
++ __field( int, fallback_order )
++ __field( int, alloc_migratetype )
++ __field( int, fallback_migratetype )
++ ),
++
++ TP_fast_assign(
++ __entry->page = page;
++ __entry->alloc_order = alloc_order;
++ __entry->fallback_order = fallback_order;
++ __entry->alloc_migratetype = alloc_migratetype;
++ __entry->fallback_migratetype = fallback_migratetype;
++ ),
++
++ TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
++ __entry->page,
++ page_to_pfn(__entry->page),
++ __entry->alloc_order,
++ __entry->fallback_order,
++ pageblock_order,
++ __entry->alloc_migratetype,
++ __entry->fallback_migratetype,
++ __entry->fallback_order < pageblock_order,
++ __entry->alloc_migratetype == __entry->fallback_migratetype)
++);
++
++#endif /* _TRACE_KMEM_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/kvm.h
+@@ -0,0 +1,312 @@
++#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_KVM_MAIN_H
++
++#include <linux/tracepoint.h>
++
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM kvm
++
++#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
++
++#define kvm_trace_exit_reason \
++ ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
++ ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
++ ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
++ ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
++ ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI)
++
++TRACE_EVENT(kvm_userspace_exit,
++ TP_PROTO(__u32 reason, int errno),
++ TP_ARGS(reason, errno),
++
++ TP_STRUCT__entry(
++ __field( __u32, reason )
++ __field( int, errno )
++ ),
++
++ TP_fast_assign(
++ __entry->reason = reason;
++ __entry->errno = errno;
++ ),
++
++ TP_printk("reason %s (%d)",
++ __entry->errno < 0 ?
++ (__entry->errno == -EINTR ? "restart" : "error") :
++ __print_symbolic(__entry->reason, kvm_trace_exit_reason),
++ __entry->errno < 0 ? -__entry->errno : __entry->reason)
++);
++
++#if defined(__KVM_HAVE_IOAPIC)
++TRACE_EVENT(kvm_set_irq,
++ TP_PROTO(unsigned int gsi, int level, int irq_source_id),
++ TP_ARGS(gsi, level, irq_source_id),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, gsi )
++ __field( int, level )
++ __field( int, irq_source_id )
++ ),
++
++ TP_fast_assign(
++ __entry->gsi = gsi;
++ __entry->level = level;
++ __entry->irq_source_id = irq_source_id;
++ ),
++
++ TP_printk("gsi %u level %d source %d",
++ __entry->gsi, __entry->level, __entry->irq_source_id)
++);
++
++#define kvm_deliver_mode \
++ {0x0, "Fixed"}, \
++ {0x1, "LowPrio"}, \
++ {0x2, "SMI"}, \
++ {0x3, "Res3"}, \
++ {0x4, "NMI"}, \
++ {0x5, "INIT"}, \
++ {0x6, "SIPI"}, \
++ {0x7, "ExtINT"}
++
++TRACE_EVENT(kvm_ioapic_set_irq,
++ TP_PROTO(__u64 e, int pin, bool coalesced),
++ TP_ARGS(e, pin, coalesced),
++
++ TP_STRUCT__entry(
++ __field( __u64, e )
++ __field( int, pin )
++ __field( bool, coalesced )
++ ),
++
++ TP_fast_assign(
++ __entry->e = e;
++ __entry->pin = pin;
++ __entry->coalesced = coalesced;
++ ),
++
++ TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
++ __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
++ __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
++ (__entry->e & (1<<11)) ? "logical" : "physical",
++ (__entry->e & (1<<15)) ? "level" : "edge",
++ (__entry->e & (1<<16)) ? "|masked" : "",
++ __entry->coalesced ? " (coalesced)" : "")
++);
++
++TRACE_EVENT(kvm_msi_set_irq,
++ TP_PROTO(__u64 address, __u64 data),
++ TP_ARGS(address, data),
++
++ TP_STRUCT__entry(
++ __field( __u64, address )
++ __field( __u64, data )
++ ),
++
++ TP_fast_assign(
++ __entry->address = address;
++ __entry->data = data;
++ ),
++
++ TP_printk("dst %u vec %x (%s|%s|%s%s)",
++ (u8)(__entry->address >> 12), (u8)__entry->data,
++ __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
++ (__entry->address & (1<<2)) ? "logical" : "physical",
++ (__entry->data & (1<<15)) ? "level" : "edge",
++ (__entry->address & (1<<3)) ? "|rh" : "")
++);
++
++#define kvm_irqchips \
++ {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
++ {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
++ {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
++
++TRACE_EVENT(kvm_ack_irq,
++ TP_PROTO(unsigned int irqchip, unsigned int pin),
++ TP_ARGS(irqchip, pin),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, irqchip )
++ __field( unsigned int, pin )
++ ),
++
++ TP_fast_assign(
++ __entry->irqchip = irqchip;
++ __entry->pin = pin;
++ ),
++
++ TP_printk("irqchip %s pin %u",
++ __print_symbolic(__entry->irqchip, kvm_irqchips),
++ __entry->pin)
++);
++
++
++
++#endif /* defined(__KVM_HAVE_IOAPIC) */
++
++#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
++#define KVM_TRACE_MMIO_READ 1
++#define KVM_TRACE_MMIO_WRITE 2
++
++#define kvm_trace_symbol_mmio \
++ { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
++ { KVM_TRACE_MMIO_READ, "read" }, \
++ { KVM_TRACE_MMIO_WRITE, "write" }
++
++TRACE_EVENT(kvm_mmio,
++ TP_PROTO(int type, int len, u64 gpa, u64 val),
++ TP_ARGS(type, len, gpa, val),
++
++ TP_STRUCT__entry(
++ __field( u32, type )
++ __field( u32, len )
++ __field( u64, gpa )
++ __field( u64, val )
++ ),
++
++ TP_fast_assign(
++ __entry->type = type;
++ __entry->len = len;
++ __entry->gpa = gpa;
++ __entry->val = val;
++ ),
++
++ TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
++ __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
++ __entry->len, __entry->gpa, __entry->val)
++);
++
++#define kvm_fpu_load_symbol \
++ {0, "unload"}, \
++ {1, "load"}
++
++TRACE_EVENT(kvm_fpu,
++ TP_PROTO(int load),
++ TP_ARGS(load),
++
++ TP_STRUCT__entry(
++ __field( u32, load )
++ ),
++
++ TP_fast_assign(
++ __entry->load = load;
++ ),
++
++ TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
++);
++
++TRACE_EVENT(kvm_age_page,
++ TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
++ TP_ARGS(hva, slot, ref),
++
++ TP_STRUCT__entry(
++ __field( u64, hva )
++ __field( u64, gfn )
++ __field( u8, referenced )
++ ),
++
++ TP_fast_assign(
++ __entry->hva = hva;
++ __entry->gfn =
++ slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT);
++ __entry->referenced = ref;
++ ),
++
++ TP_printk("hva %llx gfn %llx %s",
++ __entry->hva, __entry->gfn,
++ __entry->referenced ? "YOUNG" : "OLD")
++);
++
++#ifdef CONFIG_KVM_ASYNC_PF
++DECLARE_EVENT_CLASS(kvm_async_get_page_class,
++
++ TP_PROTO(u64 gva, u64 gfn),
++
++ TP_ARGS(gva, gfn),
++
++ TP_STRUCT__entry(
++ __field(__u64, gva)
++ __field(u64, gfn)
++ ),
++
++ TP_fast_assign(
++ __entry->gva = gva;
++ __entry->gfn = gfn;
++ ),
++
++ TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
++);
++
++DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
++
++ TP_PROTO(u64 gva, u64 gfn),
++
++ TP_ARGS(gva, gfn)
++);
++
++DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
++
++ TP_PROTO(u64 gva, u64 gfn),
++
++ TP_ARGS(gva, gfn)
++);
++
++DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
++
++ TP_PROTO(u64 token, u64 gva),
++
++ TP_ARGS(token, gva),
++
++ TP_STRUCT__entry(
++ __field(__u64, token)
++ __field(__u64, gva)
++ ),
++
++ TP_fast_assign(
++ __entry->token = token;
++ __entry->gva = gva;
++ ),
++
++ TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
++
++);
++
++DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
++
++ TP_PROTO(u64 token, u64 gva),
++
++ TP_ARGS(token, gva)
++);
++
++DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
++
++ TP_PROTO(u64 token, u64 gva),
++
++ TP_ARGS(token, gva)
++);
++
++TRACE_EVENT(
++ kvm_async_pf_completed,
++ TP_PROTO(unsigned long address, struct page *page, u64 gva),
++ TP_ARGS(address, page, gva),
++
++ TP_STRUCT__entry(
++ __field(unsigned long, address)
++ __field(pfn_t, pfn)
++ __field(u64, gva)
++ ),
++
++ TP_fast_assign(
++ __entry->address = address;
++ __entry->pfn = page ? page_to_pfn(page) : 0;
++ __entry->gva = gva;
++ ),
++
++ TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva,
++ __entry->address, __entry->pfn)
++);
++
++#endif
++
++#endif /* _TRACE_KVM_MAIN_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/lock.h
+@@ -0,0 +1,86 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM lock
++
++#if !defined(_TRACE_LOCK_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_LOCK_H
++
++#include <linux/lockdep.h>
++#include <linux/tracepoint.h>
++
++#ifdef CONFIG_LOCKDEP
++
++TRACE_EVENT(lock_acquire,
++
++ TP_PROTO(struct lockdep_map *lock, unsigned int subclass,
++ int trylock, int read, int check,
++ struct lockdep_map *next_lock, unsigned long ip),
++
++ TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip),
++
++ TP_STRUCT__entry(
++ __field(unsigned int, flags)
++ __string(name, lock->name)
++ __field(void *, lockdep_addr)
++ ),
++
++ TP_fast_assign(
++ __entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0);
++ __assign_str(name, lock->name);
++ __entry->lockdep_addr = lock;
++ ),
++
++ TP_printk("%p %s%s%s", __entry->lockdep_addr,
++ (__entry->flags & 1) ? "try " : "",
++ (__entry->flags & 2) ? "read " : "",
++ __get_str(name))
++);
++
++DECLARE_EVENT_CLASS(lock,
++
++ TP_PROTO(struct lockdep_map *lock, unsigned long ip),
++
++ TP_ARGS(lock, ip),
++
++ TP_STRUCT__entry(
++ __string( name, lock->name )
++ __field( void *, lockdep_addr )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, lock->name);
++ __entry->lockdep_addr = lock;
++ ),
++
++ TP_printk("%p %s", __entry->lockdep_addr, __get_str(name))
++);
++
++DEFINE_EVENT(lock, lock_release,
++
++ TP_PROTO(struct lockdep_map *lock, unsigned long ip),
++
++ TP_ARGS(lock, ip)
++);
++
++#ifdef CONFIG_LOCK_STAT
++
++DEFINE_EVENT(lock, lock_contended,
++
++ TP_PROTO(struct lockdep_map *lock, unsigned long ip),
++
++ TP_ARGS(lock, ip)
++);
++
++DEFINE_EVENT(lock, lock_acquired,
++
++ TP_PROTO(struct lockdep_map *lock, unsigned long ip),
++
++ TP_ARGS(lock, ip)
++);
++
++#endif
++#endif
++
++#endif /* _TRACE_LOCK_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/module.h
+@@ -0,0 +1,131 @@
++/*
++ * Because linux/module.h has tracepoints in the header, and ftrace.h
++ * eventually includes this file, define_trace.h includes linux/module.h
++ * But we do not want the module.h to override the TRACE_SYSTEM macro
++ * variable that define_trace.h is processing, so we only set it
++ * when module events are being processed, which would happen when
++ * CREATE_TRACE_POINTS is defined.
++ */
++#ifdef CREATE_TRACE_POINTS
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM module
++#endif
++
++#if !defined(_TRACE_MODULE_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_MODULE_H
++
++#include <linux/tracepoint.h>
++
++#ifdef CONFIG_MODULES
++
++struct module;
++
++#define show_module_flags(flags) __print_flags(flags, "", \
++ { (1UL << TAINT_PROPRIETARY_MODULE), "P" }, \
++ { (1UL << TAINT_FORCED_MODULE), "F" }, \
++ { (1UL << TAINT_CRAP), "C" })
++
++TRACE_EVENT(module_load,
++
++ TP_PROTO(struct module *mod),
++
++ TP_ARGS(mod),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, taints )
++ __string( name, mod->name )
++ ),
++
++ TP_fast_assign(
++ __entry->taints = mod->taints;
++ __assign_str(name, mod->name);
++ ),
++
++ TP_printk("%s %s", __get_str(name), show_module_flags(__entry->taints))
++);
++
++TRACE_EVENT(module_free,
++
++ TP_PROTO(struct module *mod),
++
++ TP_ARGS(mod),
++
++ TP_STRUCT__entry(
++ __string( name, mod->name )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, mod->name);
++ ),
++
++ TP_printk("%s", __get_str(name))
++);
++
++#ifdef CONFIG_MODULE_UNLOAD
++/* trace_module_get/put are only used if CONFIG_MODULE_UNLOAD is defined */
++
++DECLARE_EVENT_CLASS(module_refcnt,
++
++ TP_PROTO(struct module *mod, unsigned long ip),
++
++ TP_ARGS(mod, ip),
++
++ TP_STRUCT__entry(
++ __field( unsigned long, ip )
++ __field( int, refcnt )
++ __string( name, mod->name )
++ ),
++
++ TP_fast_assign(
++ __entry->ip = ip;
++ __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs);
++ __assign_str(name, mod->name);
++ ),
++
++ TP_printk("%s call_site=%pf refcnt=%d",
++ __get_str(name), (void *)__entry->ip, __entry->refcnt)
++);
++
++DEFINE_EVENT(module_refcnt, module_get,
++
++ TP_PROTO(struct module *mod, unsigned long ip),
++
++ TP_ARGS(mod, ip)
++);
++
++DEFINE_EVENT(module_refcnt, module_put,
++
++ TP_PROTO(struct module *mod, unsigned long ip),
++
++ TP_ARGS(mod, ip)
++);
++#endif /* CONFIG_MODULE_UNLOAD */
++
++TRACE_EVENT(module_request,
++
++ TP_PROTO(char *name, bool wait, unsigned long ip),
++
++ TP_ARGS(name, wait, ip),
++
++ TP_STRUCT__entry(
++ __field( unsigned long, ip )
++ __field( bool, wait )
++ __string( name, name )
++ ),
++
++ TP_fast_assign(
++ __entry->ip = ip;
++ __entry->wait = wait;
++ __assign_str(name, name);
++ ),
++
++ TP_printk("%s wait=%d call_site=%pf",
++ __get_str(name), (int)__entry->wait, (void *)__entry->ip)
++);
++
++#endif /* CONFIG_MODULES */
++
++#endif /* _TRACE_MODULE_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/napi.h
+@@ -0,0 +1,38 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM napi
++
++#if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_NAPI_H_
++
++#include <linux/netdevice.h>
++#include <linux/tracepoint.h>
++#include <linux/ftrace.h>
++
++#define NO_DEV "(no_device)"
++
++TRACE_EVENT(napi_poll,
++
++ TP_PROTO(struct napi_struct *napi),
++
++ TP_ARGS(napi),
++
++ TP_STRUCT__entry(
++ __field( struct napi_struct *, napi)
++ __string( dev_name, napi->dev ? napi->dev->name : NO_DEV)
++ ),
++
++ TP_fast_assign(
++ __entry->napi = napi;
++ __assign_str(dev_name, napi->dev ? napi->dev->name : NO_DEV);
++ ),
++
++ TP_printk("napi poll on napi struct %p for device %s",
++ __entry->napi, __get_str(dev_name))
++);
++
++#undef NO_DEV
++
++#endif /* _TRACE_NAPI_H_ */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/net.h
+@@ -0,0 +1,84 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM net
++
++#if !defined(_TRACE_NET_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_NET_H
++
++#include <linux/skbuff.h>
++#include <linux/netdevice.h>
++#include <linux/ip.h>
++#include <linux/tracepoint.h>
++
++TRACE_EVENT(net_dev_xmit,
++
++ TP_PROTO(struct sk_buff *skb,
++ int rc,
++ struct net_device *dev,
++ unsigned int skb_len),
++
++ TP_ARGS(skb, rc, dev, skb_len),
++
++ TP_STRUCT__entry(
++ __field( void *, skbaddr )
++ __field( unsigned int, len )
++ __field( int, rc )
++ __string( name, dev->name )
++ ),
++
++ TP_fast_assign(
++ __entry->skbaddr = skb;
++ __entry->len = skb_len;
++ __entry->rc = rc;
++ __assign_str(name, dev->name);
++ ),
++
++ TP_printk("dev=%s skbaddr=%p len=%u rc=%d",
++ __get_str(name), __entry->skbaddr, __entry->len, __entry->rc)
++);
++
++DECLARE_EVENT_CLASS(net_dev_template,
++
++ TP_PROTO(struct sk_buff *skb),
++
++ TP_ARGS(skb),
++
++ TP_STRUCT__entry(
++ __field( void *, skbaddr )
++ __field( unsigned int, len )
++ __string( name, skb->dev->name )
++ ),
++
++ TP_fast_assign(
++ __entry->skbaddr = skb;
++ __entry->len = skb->len;
++ __assign_str(name, skb->dev->name);
++ ),
++
++ TP_printk("dev=%s skbaddr=%p len=%u",
++ __get_str(name), __entry->skbaddr, __entry->len)
++)
++
++DEFINE_EVENT(net_dev_template, net_dev_queue,
++
++ TP_PROTO(struct sk_buff *skb),
++
++ TP_ARGS(skb)
++);
++
++DEFINE_EVENT(net_dev_template, netif_receive_skb,
++
++ TP_PROTO(struct sk_buff *skb),
++
++ TP_ARGS(skb)
++);
++
++DEFINE_EVENT(net_dev_template, netif_rx,
++
++ TP_PROTO(struct sk_buff *skb),
++
++ TP_ARGS(skb)
++);
++#endif /* _TRACE_NET_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/power.h
+@@ -0,0 +1,275 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM power
++
++#if !defined(_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_POWER_H
++
++#include <linux/ktime.h>
++#include <linux/tracepoint.h>
++
++DECLARE_EVENT_CLASS(cpu,
++
++ TP_PROTO(unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(state, cpu_id),
++
++ TP_STRUCT__entry(
++ __field( u32, state )
++ __field( u32, cpu_id )
++ ),
++
++ TP_fast_assign(
++ __entry->state = state;
++ __entry->cpu_id = cpu_id;
++ ),
++
++ TP_printk("state=%lu cpu_id=%lu", (unsigned long)__entry->state,
++ (unsigned long)__entry->cpu_id)
++);
++
++DEFINE_EVENT(cpu, cpu_idle,
++
++ TP_PROTO(unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(state, cpu_id)
++);
++
++/* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */
++#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING
++#define _PWR_EVENT_AVOID_DOUBLE_DEFINING
++
++#define PWR_EVENT_EXIT -1
++#endif
++
++DEFINE_EVENT(cpu, cpu_frequency,
++
++ TP_PROTO(unsigned int frequency, unsigned int cpu_id),
++
++ TP_ARGS(frequency, cpu_id)
++);
++
++TRACE_EVENT(machine_suspend,
++
++ TP_PROTO(unsigned int state),
++
++ TP_ARGS(state),
++
++ TP_STRUCT__entry(
++ __field( u32, state )
++ ),
++
++ TP_fast_assign(
++ __entry->state = state;
++ ),
++
++ TP_printk("state=%lu", (unsigned long)__entry->state)
++);
++
++DECLARE_EVENT_CLASS(wakeup_source,
++
++ TP_PROTO(const char *name, unsigned int state),
++
++ TP_ARGS(name, state),
++
++ TP_STRUCT__entry(
++ __string( name, name )
++ __field( u64, state )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, name);
++ __entry->state = state;
++ ),
++
++ TP_printk("%s state=0x%lx", __get_str(name),
++ (unsigned long)__entry->state)
++);
++
++DEFINE_EVENT(wakeup_source, wakeup_source_activate,
++
++ TP_PROTO(const char *name, unsigned int state),
++
++ TP_ARGS(name, state)
++);
++
++DEFINE_EVENT(wakeup_source, wakeup_source_deactivate,
++
++ TP_PROTO(const char *name, unsigned int state),
++
++ TP_ARGS(name, state)
++);
++
++#ifdef CONFIG_EVENT_POWER_TRACING_DEPRECATED
++
++/*
++ * The power events are used for cpuidle & suspend (power_start, power_end)
++ * and for cpufreq (power_frequency)
++ */
++DECLARE_EVENT_CLASS(power,
++
++ TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(type, state, cpu_id),
++
++ TP_STRUCT__entry(
++ __field( u64, type )
++ __field( u64, state )
++ __field( u64, cpu_id )
++ ),
++
++ TP_fast_assign(
++ __entry->type = type;
++ __entry->state = state;
++ __entry->cpu_id = cpu_id;
++ ),
++
++ TP_printk("type=%lu state=%lu cpu_id=%lu", (unsigned long)__entry->type,
++ (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
++);
++
++DEFINE_EVENT(power, power_start,
++
++ TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(type, state, cpu_id)
++);
++
++DEFINE_EVENT(power, power_frequency,
++
++ TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(type, state, cpu_id)
++);
++
++TRACE_EVENT(power_end,
++
++ TP_PROTO(unsigned int cpu_id),
++
++ TP_ARGS(cpu_id),
++
++ TP_STRUCT__entry(
++ __field( u64, cpu_id )
++ ),
++
++ TP_fast_assign(
++ __entry->cpu_id = cpu_id;
++ ),
++
++ TP_printk("cpu_id=%lu", (unsigned long)__entry->cpu_id)
++
++);
++
++/* Deprecated dummy functions must be protected against multi-declartion */
++#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
++#define _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
++
++enum {
++ POWER_NONE = 0,
++ POWER_CSTATE = 1,
++ POWER_PSTATE = 2,
++};
++#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */
++
++#else /* CONFIG_EVENT_POWER_TRACING_DEPRECATED */
++
++#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
++#define _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
++enum {
++ POWER_NONE = 0,
++ POWER_CSTATE = 1,
++ POWER_PSTATE = 2,
++};
++
++/* These dummy declaration have to be ripped out when the deprecated
++ events get removed */
++static inline void trace_power_start(u64 type, u64 state, u64 cpuid) {};
++static inline void trace_power_end(u64 cpuid) {};
++static inline void trace_power_start_rcuidle(u64 type, u64 state, u64 cpuid) {};
++static inline void trace_power_end_rcuidle(u64 cpuid) {};
++static inline void trace_power_frequency(u64 type, u64 state, u64 cpuid) {};
++#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */
++
++#endif /* CONFIG_EVENT_POWER_TRACING_DEPRECATED */
++
++/*
++ * The clock events are used for clock enable/disable and for
++ * clock rate change
++ */
++DECLARE_EVENT_CLASS(clock,
++
++ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(name, state, cpu_id),
++
++ TP_STRUCT__entry(
++ __string( name, name )
++ __field( u64, state )
++ __field( u64, cpu_id )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, name);
++ __entry->state = state;
++ __entry->cpu_id = cpu_id;
++ ),
++
++ TP_printk("%s state=%lu cpu_id=%lu", __get_str(name),
++ (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
++);
++
++DEFINE_EVENT(clock, clock_enable,
++
++ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(name, state, cpu_id)
++);
++
++DEFINE_EVENT(clock, clock_disable,
++
++ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(name, state, cpu_id)
++);
++
++DEFINE_EVENT(clock, clock_set_rate,
++
++ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(name, state, cpu_id)
++);
++
++/*
++ * The power domain events are used for power domains transitions
++ */
++DECLARE_EVENT_CLASS(power_domain,
++
++ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(name, state, cpu_id),
++
++ TP_STRUCT__entry(
++ __string( name, name )
++ __field( u64, state )
++ __field( u64, cpu_id )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, name);
++ __entry->state = state;
++ __entry->cpu_id = cpu_id;
++),
++
++ TP_printk("%s state=%lu cpu_id=%lu", __get_str(name),
++ (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
++);
++
++DEFINE_EVENT(power_domain, power_domain_target,
++
++ TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
++
++ TP_ARGS(name, state, cpu_id)
++);
++#endif /* _TRACE_POWER_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/printk.h
+@@ -0,0 +1,41 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM printk
++
++#if !defined(_TRACE_PRINTK_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_PRINTK_H
++
++#include <linux/tracepoint.h>
++
++TRACE_EVENT_CONDITION(console,
++ TP_PROTO(const char *log_buf, unsigned start, unsigned end,
++ unsigned log_buf_len),
++
++ TP_ARGS(log_buf, start, end, log_buf_len),
++
++ TP_CONDITION(start != end),
++
++ TP_STRUCT__entry(
++ __dynamic_array(char, msg, end - start + 1)
++ ),
++
++ TP_fast_assign(
++ if ((start & (log_buf_len - 1)) > (end & (log_buf_len - 1))) {
++ memcpy(__get_dynamic_array(msg),
++ log_buf + (start & (log_buf_len - 1)),
++ log_buf_len - (start & (log_buf_len - 1)));
++ memcpy((char *)__get_dynamic_array(msg) +
++ log_buf_len - (start & (log_buf_len - 1)),
++ log_buf, end & (log_buf_len - 1));
++ } else
++ memcpy(__get_dynamic_array(msg),
++ log_buf + (start & (log_buf_len - 1)),
++ end - start);
++ ((char *)__get_dynamic_array(msg))[end - start] = 0;
++ ),
++
++ TP_printk("%s", __get_str(msg))
++);
++#endif /* _TRACE_PRINTK_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/random.h
+@@ -0,0 +1,134 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM random
++
++#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_RANDOM_H
++
++#include <linux/writeback.h>
++#include <linux/tracepoint.h>
++
++DECLARE_EVENT_CLASS(random__mix_pool_bytes,
++ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
++
++ TP_ARGS(pool_name, bytes, IP),
++
++ TP_STRUCT__entry(
++ __field( const char *, pool_name )
++ __field( int, bytes )
++ __field(unsigned long, IP )
++ ),
++
++ TP_fast_assign(
++ __entry->pool_name = pool_name;
++ __entry->bytes = bytes;
++ __entry->IP = IP;
++ ),
++
++ TP_printk("%s pool: bytes %d caller %pF",
++ __entry->pool_name, __entry->bytes, (void *)__entry->IP)
++);
++
++DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
++ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
++
++ TP_ARGS(pool_name, bytes, IP)
++);
++
++DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
++ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
++
++ TP_ARGS(pool_name, bytes, IP)
++);
++
++TRACE_EVENT(credit_entropy_bits,
++ TP_PROTO(const char *pool_name, int bits, int entropy_count,
++ int entropy_total, unsigned long IP),
++
++ TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP),
++
++ TP_STRUCT__entry(
++ __field( const char *, pool_name )
++ __field( int, bits )
++ __field( int, entropy_count )
++ __field( int, entropy_total )
++ __field(unsigned long, IP )
++ ),
++
++ TP_fast_assign(
++ __entry->pool_name = pool_name;
++ __entry->bits = bits;
++ __entry->entropy_count = entropy_count;
++ __entry->entropy_total = entropy_total;
++ __entry->IP = IP;
++ ),
++
++ TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
++ "caller %pF", __entry->pool_name, __entry->bits,
++ __entry->entropy_count, __entry->entropy_total,
++ (void *)__entry->IP)
++);
++
++TRACE_EVENT(get_random_bytes,
++ TP_PROTO(int nbytes, unsigned long IP),
++
++ TP_ARGS(nbytes, IP),
++
++ TP_STRUCT__entry(
++ __field( int, nbytes )
++ __field(unsigned long, IP )
++ ),
++
++ TP_fast_assign(
++ __entry->nbytes = nbytes;
++ __entry->IP = IP;
++ ),
++
++ TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
++);
++
++DECLARE_EVENT_CLASS(random__extract_entropy,
++ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
++ unsigned long IP),
++
++ TP_ARGS(pool_name, nbytes, entropy_count, IP),
++
++ TP_STRUCT__entry(
++ __field( const char *, pool_name )
++ __field( int, nbytes )
++ __field( int, entropy_count )
++ __field(unsigned long, IP )
++ ),
++
++ TP_fast_assign(
++ __entry->pool_name = pool_name;
++ __entry->nbytes = nbytes;
++ __entry->entropy_count = entropy_count;
++ __entry->IP = IP;
++ ),
++
++ TP_printk("%s pool: nbytes %d entropy_count %d caller %pF",
++ __entry->pool_name, __entry->nbytes, __entry->entropy_count,
++ (void *)__entry->IP)
++);
++
++
++DEFINE_EVENT(random__extract_entropy, extract_entropy,
++ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
++ unsigned long IP),
++
++ TP_ARGS(pool_name, nbytes, entropy_count, IP)
++);
++
++DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
++ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
++ unsigned long IP),
++
++ TP_ARGS(pool_name, nbytes, entropy_count, IP)
++);
++
++
++
++#endif /* _TRACE_RANDOM_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/rcu.h
+@@ -0,0 +1,618 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM rcu
++
++#if !defined(_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_RCU_H
++
++#include <linux/tracepoint.h>
++
++/*
++ * Tracepoint for start/end markers used for utilization calculations.
++ * By convention, the string is of the following forms:
++ *
++ * "Start <activity>" -- Mark the start of the specified activity,
++ * such as "context switch". Nesting is permitted.
++ * "End <activity>" -- Mark the end of the specified activity.
++ *
++ * An "@" character within "<activity>" is a comment character: Data
++ * reduction scripts will ignore the "@" and the remainder of the line.
++ */
++TRACE_EVENT(rcu_utilization,
++
++ TP_PROTO(char *s),
++
++ TP_ARGS(s),
++
++ TP_STRUCT__entry(
++ __field(char *, s)
++ ),
++
++ TP_fast_assign(
++ __entry->s = s;
++ ),
++
++ TP_printk("%s", __entry->s)
++);
++
++#ifdef CONFIG_RCU_TRACE
++
++#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
++
++/*
++ * Tracepoint for grace-period events: starting and ending a grace
++ * period ("start" and "end", respectively), a CPU noting the start
++ * of a new grace period or the end of an old grace period ("cpustart"
++ * and "cpuend", respectively), a CPU passing through a quiescent
++ * state ("cpuqs"), a CPU coming online or going offline ("cpuonl"
++ * and "cpuofl", respectively), and a CPU being kicked for being too
++ * long in dyntick-idle mode ("kick").
++ */
++TRACE_EVENT(rcu_grace_period,
++
++ TP_PROTO(char *rcuname, unsigned long gpnum, char *gpevent),
++
++ TP_ARGS(rcuname, gpnum, gpevent),
++
++ TP_STRUCT__entry(
++ __field(char *, rcuname)
++ __field(unsigned long, gpnum)
++ __field(char *, gpevent)
++ ),
++
++ TP_fast_assign(
++ __entry->rcuname = rcuname;
++ __entry->gpnum = gpnum;
++ __entry->gpevent = gpevent;
++ ),
++
++ TP_printk("%s %lu %s",
++ __entry->rcuname, __entry->gpnum, __entry->gpevent)
++);
++
++/*
++ * Tracepoint for grace-period-initialization events. These are
++ * distinguished by the type of RCU, the new grace-period number, the
++ * rcu_node structure level, the starting and ending CPU covered by the
++ * rcu_node structure, and the mask of CPUs that will be waited for.
++ * All but the type of RCU are extracted from the rcu_node structure.
++ */
++TRACE_EVENT(rcu_grace_period_init,
++
++ TP_PROTO(char *rcuname, unsigned long gpnum, u8 level,
++ int grplo, int grphi, unsigned long qsmask),
++
++ TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
++
++ TP_STRUCT__entry(
++ __field(char *, rcuname)
++ __field(unsigned long, gpnum)
++ __field(u8, level)
++ __field(int, grplo)
++ __field(int, grphi)
++ __field(unsigned long, qsmask)
++ ),
++
++ TP_fast_assign(
++ __entry->rcuname = rcuname;
++ __entry->gpnum = gpnum;
++ __entry->level = level;
++ __entry->grplo = grplo;
++ __entry->grphi = grphi;
++ __entry->qsmask = qsmask;
++ ),
++
++ TP_printk("%s %lu %u %d %d %lx",
++ __entry->rcuname, __entry->gpnum, __entry->level,
++ __entry->grplo, __entry->grphi, __entry->qsmask)
++);
++
++/*
++ * Tracepoint for tasks blocking within preemptible-RCU read-side
++ * critical sections. Track the type of RCU (which one day might
++ * include SRCU), the grace-period number that the task is blocking
++ * (the current or the next), and the task's PID.
++ */
++TRACE_EVENT(rcu_preempt_task,
++
++ TP_PROTO(char *rcuname, int pid, unsigned long gpnum),
++
++ TP_ARGS(rcuname, pid, gpnum),
++
++ TP_STRUCT__entry(
++ __field(char *, rcuname)
++ __field(unsigned long, gpnum)
++ __field(int, pid)
++ ),
++
++ TP_fast_assign(
++ __entry->rcuname = rcuname;
++ __entry->gpnum = gpnum;
++ __entry->pid = pid;
++ ),
++
++ TP_printk("%s %lu %d",
++ __entry->rcuname, __entry->gpnum, __entry->pid)
++);
++
++/*
++ * Tracepoint for tasks that blocked within a given preemptible-RCU
++ * read-side critical section exiting that critical section. Track the
++ * type of RCU (which one day might include SRCU) and the task's PID.
++ */
++TRACE_EVENT(rcu_unlock_preempted_task,
++
++ TP_PROTO(char *rcuname, unsigned long gpnum, int pid),
++
++ TP_ARGS(rcuname, gpnum, pid),
++
++ TP_STRUCT__entry(
++ __field(char *, rcuname)
++ __field(unsigned long, gpnum)
++ __field(int, pid)
++ ),
++
++ TP_fast_assign(
++ __entry->rcuname = rcuname;
++ __entry->gpnum = gpnum;
++ __entry->pid = pid;
++ ),
++
++ TP_printk("%s %lu %d", __entry->rcuname, __entry->gpnum, __entry->pid)
++);
++
++/*
++ * Tracepoint for quiescent-state-reporting events. These are
++ * distinguished by the type of RCU, the grace-period number, the
++ * mask of quiescent lower-level entities, the rcu_node structure level,
++ * the starting and ending CPU covered by the rcu_node structure, and
++ * whether there are any blocked tasks blocking the current grace period.
++ * All but the type of RCU are extracted from the rcu_node structure.
++ */
++TRACE_EVENT(rcu_quiescent_state_report,
++
++ TP_PROTO(char *rcuname, unsigned long gpnum,
++ unsigned long mask, unsigned long qsmask,
++ u8 level, int grplo, int grphi, int gp_tasks),
++
++ TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
++
++ TP_STRUCT__entry(
++ __field(char *, rcuname)
++ __field(unsigned long, gpnum)
++ __field(unsigned long, mask)
++ __field(unsigned long, qsmask)
++ __field(u8, level)
++ __field(int, grplo)
++ __field(int, grphi)
++ __field(u8, gp_tasks)
++ ),
++
++ TP_fast_assign(
++ __entry->rcuname = rcuname;
++ __entry->gpnum = gpnum;
++ __entry->mask = mask;
++ __entry->qsmask = qsmask;
++ __entry->level = level;
++ __entry->grplo = grplo;
++ __entry->grphi = grphi;
++ __entry->gp_tasks = gp_tasks;
++ ),
++
++ TP_printk("%s %lu %lx>%lx %u %d %d %u",
++ __entry->rcuname, __entry->gpnum,
++ __entry->mask, __entry->qsmask, __entry->level,
++ __entry->grplo, __entry->grphi, __entry->gp_tasks)
++);
++
++/*
++ * Tracepoint for quiescent states detected by force_quiescent_state().
++ * These trace events include the type of RCU, the grace-period number
++ * that was blocked by the CPU, the CPU itself, and the type of quiescent
++ * state, which can be "dti" for dyntick-idle mode, "ofl" for CPU offline,
++ * or "kick" when kicking a CPU that has been in dyntick-idle mode for
++ * too long.
++ */
++TRACE_EVENT(rcu_fqs,
++
++ TP_PROTO(char *rcuname, unsigned long gpnum, int cpu, char *qsevent),
++
++ TP_ARGS(rcuname, gpnum, cpu, qsevent),
++
++ TP_STRUCT__entry(
++ __field(char *, rcuname)
++ __field(unsigned long, gpnum)
++ __field(int, cpu)
++ __field(char *, qsevent)
++ ),
++
++ TP_fast_assign(
++ __entry->rcuname = rcuname;
++ __entry->gpnum = gpnum;
++ __entry->cpu = cpu;
++ __entry->qsevent = qsevent;
++ ),
++
++ TP_printk("%s %lu %d %s",
++ __entry->rcuname, __entry->gpnum,
++ __entry->cpu, __entry->qsevent)
++);
++
++#endif /* #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) */
++
++/*
++ * Tracepoint for dyntick-idle entry/exit events. These take a string
++ * as argument: "Start" for entering dyntick-idle mode, "End" for
++ * leaving it, "--=" for events moving towards idle, and "++=" for events
++ * moving away from idle. "Error on entry: not idle task" and "Error on
++ * exit: not idle task" indicate that a non-idle task is erroneously
++ * toying with the idle loop.
++ *
++ * These events also take a pair of numbers, which indicate the nesting
++ * depth before and after the event of interest. Note that task-related
++ * events use the upper bits of each number, while interrupt-related
++ * events use the lower bits.
++ */
++TRACE_EVENT(rcu_dyntick,
++
++ TP_PROTO(char *polarity, long long oldnesting, long long newnesting),
++
++ TP_ARGS(polarity, oldnesting, newnesting),
++
++ TP_STRUCT__entry(
++ __field(char *, polarity)
++ __field(long long, oldnesting)
++ __field(long long, newnesting)
++ ),
++
++ TP_fast_assign(
++ __entry->polarity = polarity;
++ __entry->oldnesting = oldnesting;
++ __entry->newnesting = newnesting;
++ ),
++
++ TP_printk("%s %llx %llx", __entry->polarity,
++ __entry->oldnesting, __entry->newnesting)
++);
++
++/*
++ * Tracepoint for RCU preparation for idle, the goal being to get RCU
++ * processing done so that the current CPU can shut off its scheduling
++ * clock and enter dyntick-idle mode. One way to accomplish this is
++ * to drain all RCU callbacks from this CPU, and the other is to have
++ * done everything RCU requires for the current grace period. In this
++ * latter case, the CPU will be awakened at the end of the current grace
++ * period in order to process the remainder of its callbacks.
++ *
++ * These tracepoints take a string as argument:
++ *
++ * "No callbacks": Nothing to do, no callbacks on this CPU.
++ * "In holdoff": Nothing to do, holding off after unsuccessful attempt.
++ * "Begin holdoff": Attempt failed, don't retry until next jiffy.
++ * "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
++ * "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
++ * "More callbacks": Still more callbacks, try again to clear them out.
++ * "Callbacks drained": All callbacks processed, off to dyntick idle!
++ * "Timer": Timer fired to cause CPU to continue processing callbacks.
++ * "Demigrate": Timer fired on wrong CPU, woke up correct CPU.
++ * "Cleanup after idle": Idle exited, timer canceled.
++ */
++TRACE_EVENT(rcu_prep_idle,
++
++ TP_PROTO(char *reason),
++
++ TP_ARGS(reason),
++
++ TP_STRUCT__entry(
++ __field(char *, reason)
++ ),
++
++ TP_fast_assign(
++ __entry->reason = reason;
++ ),
++
++ TP_printk("%s", __entry->reason)
++);
++
++/*
++ * Tracepoint for the registration of a single RCU callback function.
++ * The first argument is the type of RCU, the second argument is
++ * a pointer to the RCU callback itself, the third element is the
++ * number of lazy callbacks queued, and the fourth element is the
++ * total number of callbacks queued.
++ */
++TRACE_EVENT(rcu_callback,
++
++ TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen_lazy,
++ long qlen),
++
++ TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
++
++ TP_STRUCT__entry(
++ __field(char *, rcuname)
++ __field(void *, rhp)
++ __field(void *, func)
++ __field(long, qlen_lazy)
++ __field(long, qlen)
++ ),
++
++ TP_fast_assign(
++ __entry->rcuname = rcuname;
++ __entry->rhp = rhp;
++ __entry->func = rhp->func;
++ __entry->qlen_lazy = qlen_lazy;
++ __entry->qlen = qlen;
++ ),
++
++ TP_printk("%s rhp=%p func=%pf %ld/%ld",
++ __entry->rcuname, __entry->rhp, __entry->func,
++ __entry->qlen_lazy, __entry->qlen)
++);
++
++/*
++ * Tracepoint for the registration of a single RCU callback of the special
++ * kfree() form. The first argument is the RCU type, the second argument
++ * is a pointer to the RCU callback, the third argument is the offset
++ * of the callback within the enclosing RCU-protected data structure,
++ * the fourth argument is the number of lazy callbacks queued, and the
++ * fifth argument is the total number of callbacks queued.
++ */
++TRACE_EVENT(rcu_kfree_callback,
++
++ TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
++ long qlen_lazy, long qlen),
++
++ TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
++
++ TP_STRUCT__entry(
++ __field(char *, rcuname)
++ __field(void *, rhp)
++ __field(unsigned long, offset)
++ __field(long, qlen_lazy)
++ __field(long, qlen)
++ ),
++
++ TP_fast_assign(
++ __entry->rcuname = rcuname;
++ __entry->rhp = rhp;
++ __entry->offset = offset;
++ __entry->qlen_lazy = qlen_lazy;
++ __entry->qlen = qlen;
++ ),
++
++ TP_printk("%s rhp=%p func=%ld %ld/%ld",
++ __entry->rcuname, __entry->rhp, __entry->offset,
++ __entry->qlen_lazy, __entry->qlen)
++);
++
++/*
++ * Tracepoint for marking the beginning rcu_do_batch, performed to start
++ * RCU callback invocation. The first argument is the RCU flavor,
++ * the second is the number of lazy callbacks queued, the third is
++ * the total number of callbacks queued, and the fourth argument is
++ * the current RCU-callback batch limit.
++ */
++TRACE_EVENT(rcu_batch_start,
++
++ TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit),
++
++ TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
++
++ TP_STRUCT__entry(
++ __field(char *, rcuname)
++ __field(long, qlen_lazy)
++ __field(long, qlen)
++ __field(int, blimit)
++ ),
++
++ TP_fast_assign(
++ __entry->rcuname = rcuname;
++ __entry->qlen_lazy = qlen_lazy;
++ __entry->qlen = qlen;
++ __entry->blimit = blimit;
++ ),
++
++ TP_printk("%s CBs=%ld/%ld bl=%d",
++ __entry->rcuname, __entry->qlen_lazy, __entry->qlen,
++ __entry->blimit)
++);
++
++/*
++ * Tracepoint for the invocation of a single RCU callback function.
++ * The first argument is the type of RCU, and the second argument is
++ * a pointer to the RCU callback itself.
++ */
++TRACE_EVENT(rcu_invoke_callback,
++
++ TP_PROTO(char *rcuname, struct rcu_head *rhp),
++
++ TP_ARGS(rcuname, rhp),
++
++ TP_STRUCT__entry(
++ __field(char *, rcuname)
++ __field(void *, rhp)
++ __field(void *, func)
++ ),
++
++ TP_fast_assign(
++ __entry->rcuname = rcuname;
++ __entry->rhp = rhp;
++ __entry->func = rhp->func;
++ ),
++
++ TP_printk("%s rhp=%p func=%pf",
++ __entry->rcuname, __entry->rhp, __entry->func)
++);
++
++/*
++ * Tracepoint for the invocation of a single RCU callback of the special
++ * kfree() form. The first argument is the RCU flavor, the second
++ * argument is a pointer to the RCU callback, and the third argument
++ * is the offset of the callback within the enclosing RCU-protected
++ * data structure.
++ */
++TRACE_EVENT(rcu_invoke_kfree_callback,
++
++ TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset),
++
++ TP_ARGS(rcuname, rhp, offset),
++
++ TP_STRUCT__entry(
++ __field(char *, rcuname)
++ __field(void *, rhp)
++ __field(unsigned long, offset)
++ ),
++
++ TP_fast_assign(
++ __entry->rcuname = rcuname;
++ __entry->rhp = rhp;
++ __entry->offset = offset;
++ ),
++
++ TP_printk("%s rhp=%p func=%ld",
++ __entry->rcuname, __entry->rhp, __entry->offset)
++);
++
++/*
++ * Tracepoint for exiting rcu_do_batch after RCU callbacks have been
++ * invoked. The first argument is the name of the RCU flavor,
++ * the second argument is number of callbacks actually invoked,
++ * the third argument (cb) is whether or not any of the callbacks that
++ * were ready to invoke at the beginning of this batch are still
++ * queued, the fourth argument (nr) is the return value of need_resched(),
++ * the fifth argument (iit) is 1 if the current task is the idle task,
++ * and the sixth argument (risk) is the return value from
++ * rcu_is_callbacks_kthread().
++ */
++TRACE_EVENT(rcu_batch_end,
++
++ TP_PROTO(char *rcuname, int callbacks_invoked,
++ bool cb, bool nr, bool iit, bool risk),
++
++ TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
++
++ TP_STRUCT__entry(
++ __field(char *, rcuname)
++ __field(int, callbacks_invoked)
++ __field(bool, cb)
++ __field(bool, nr)
++ __field(bool, iit)
++ __field(bool, risk)
++ ),
++
++ TP_fast_assign(
++ __entry->rcuname = rcuname;
++ __entry->callbacks_invoked = callbacks_invoked;
++ __entry->cb = cb;
++ __entry->nr = nr;
++ __entry->iit = iit;
++ __entry->risk = risk;
++ ),
++
++ TP_printk("%s CBs-invoked=%d idle=%c%c%c%c",
++ __entry->rcuname, __entry->callbacks_invoked,
++ __entry->cb ? 'C' : '.',
++ __entry->nr ? 'S' : '.',
++ __entry->iit ? 'I' : '.',
++ __entry->risk ? 'R' : '.')
++);
++
++/*
++ * Tracepoint for rcutorture readers. The first argument is the name
++ * of the RCU flavor from rcutorture's viewpoint and the second argument
++ * is the callback address.
++ */
++TRACE_EVENT(rcu_torture_read,
++
++ TP_PROTO(char *rcutorturename, struct rcu_head *rhp),
++
++ TP_ARGS(rcutorturename, rhp),
++
++ TP_STRUCT__entry(
++ __field(char *, rcutorturename)
++ __field(struct rcu_head *, rhp)
++ ),
++
++ TP_fast_assign(
++ __entry->rcutorturename = rcutorturename;
++ __entry->rhp = rhp;
++ ),
++
++ TP_printk("%s torture read %p",
++ __entry->rcutorturename, __entry->rhp)
++);
++
++/*
++ * Tracepoint for _rcu_barrier() execution. The string "s" describes
++ * the _rcu_barrier phase:
++ * "Begin": rcu_barrier_callback() started.
++ * "Check": rcu_barrier_callback() checking for piggybacking.
++ * "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit.
++ * "Inc1": rcu_barrier_callback() piggyback check counter incremented.
++ * "Offline": rcu_barrier_callback() found offline CPU
++ * "OnlineQ": rcu_barrier_callback() found online CPU with callbacks.
++ * "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks.
++ * "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
++ * "CB": An rcu_barrier_callback() invoked a callback, not the last.
++ * "LastCB": An rcu_barrier_callback() invoked the last callback.
++ * "Inc2": rcu_barrier_callback() piggyback check counter incremented.
++ * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
++ * is the count of remaining callbacks, and "done" is the piggybacking count.
++ */
++TRACE_EVENT(rcu_barrier,
++
++ TP_PROTO(char *rcuname, char *s, int cpu, int cnt, unsigned long done),
++
++ TP_ARGS(rcuname, s, cpu, cnt, done),
++
++ TP_STRUCT__entry(
++ __field(char *, rcuname)
++ __field(char *, s)
++ __field(int, cpu)
++ __field(int, cnt)
++ __field(unsigned long, done)
++ ),
++
++ TP_fast_assign(
++ __entry->rcuname = rcuname;
++ __entry->s = s;
++ __entry->cpu = cpu;
++ __entry->cnt = cnt;
++ __entry->done = done;
++ ),
++
++ TP_printk("%s %s cpu %d remaining %d # %lu",
++ __entry->rcuname, __entry->s, __entry->cpu, __entry->cnt,
++ __entry->done)
++);
++
++#else /* #ifdef CONFIG_RCU_TRACE */
++
++#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
++#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
++ qsmask) do { } while (0)
++#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
++#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
++#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
++ grplo, grphi, gp_tasks) do { } \
++ while (0)
++#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
++#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
++#define trace_rcu_prep_idle(reason) do { } while (0)
++#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
++#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
++ do { } while (0)
++#define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
++ do { } while (0)
++#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
++#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
++#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
++ do { } while (0)
++#define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
++#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
++
++#endif /* #else #ifdef CONFIG_RCU_TRACE */
++
++#endif /* _TRACE_RCU_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/regmap.h
+@@ -0,0 +1,181 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM regmap
++
++#if !defined(_TRACE_REGMAP_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_REGMAP_H
++
++#include <linux/ktime.h>
++#include <linux/tracepoint.h>
++
++struct device;
++struct regmap;
++
++/*
++ * Log register events
++ */
++DECLARE_EVENT_CLASS(regmap_reg,
++
++ TP_PROTO(struct device *dev, unsigned int reg,
++ unsigned int val),
++
++ TP_ARGS(dev, reg, val),
++
++ TP_STRUCT__entry(
++ __string( name, dev_name(dev) )
++ __field( unsigned int, reg )
++ __field( unsigned int, val )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, dev_name(dev));
++ __entry->reg = reg;
++ __entry->val = val;
++ ),
++
++ TP_printk("%s reg=%x val=%x", __get_str(name),
++ (unsigned int)__entry->reg,
++ (unsigned int)__entry->val)
++);
++
++DEFINE_EVENT(regmap_reg, regmap_reg_write,
++
++ TP_PROTO(struct device *dev, unsigned int reg,
++ unsigned int val),
++
++ TP_ARGS(dev, reg, val)
++
++);
++
++DEFINE_EVENT(regmap_reg, regmap_reg_read,
++
++ TP_PROTO(struct device *dev, unsigned int reg,
++ unsigned int val),
++
++ TP_ARGS(dev, reg, val)
++
++);
++
++DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
++
++ TP_PROTO(struct device *dev, unsigned int reg,
++ unsigned int val),
++
++ TP_ARGS(dev, reg, val)
++
++);
++
++DECLARE_EVENT_CLASS(regmap_block,
++
++ TP_PROTO(struct device *dev, unsigned int reg, int count),
++
++ TP_ARGS(dev, reg, count),
++
++ TP_STRUCT__entry(
++ __string( name, dev_name(dev) )
++ __field( unsigned int, reg )
++ __field( int, count )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, dev_name(dev));
++ __entry->reg = reg;
++ __entry->count = count;
++ ),
++
++ TP_printk("%s reg=%x count=%d", __get_str(name),
++ (unsigned int)__entry->reg,
++ (int)__entry->count)
++);
++
++DEFINE_EVENT(regmap_block, regmap_hw_read_start,
++
++ TP_PROTO(struct device *dev, unsigned int reg, int count),
++
++ TP_ARGS(dev, reg, count)
++);
++
++DEFINE_EVENT(regmap_block, regmap_hw_read_done,
++
++ TP_PROTO(struct device *dev, unsigned int reg, int count),
++
++ TP_ARGS(dev, reg, count)
++);
++
++DEFINE_EVENT(regmap_block, regmap_hw_write_start,
++
++ TP_PROTO(struct device *dev, unsigned int reg, int count),
++
++ TP_ARGS(dev, reg, count)
++);
++
++DEFINE_EVENT(regmap_block, regmap_hw_write_done,
++
++ TP_PROTO(struct device *dev, unsigned int reg, int count),
++
++ TP_ARGS(dev, reg, count)
++);
++
++TRACE_EVENT(regcache_sync,
++
++ TP_PROTO(struct device *dev, const char *type,
++ const char *status),
++
++ TP_ARGS(dev, type, status),
++
++ TP_STRUCT__entry(
++ __string( name, dev_name(dev) )
++ __string( status, status )
++ __string( type, type )
++ __field( int, type )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, dev_name(dev));
++ __assign_str(status, status);
++ __assign_str(type, type);
++ ),
++
++ TP_printk("%s type=%s status=%s", __get_str(name),
++ __get_str(type), __get_str(status))
++);
++
++DECLARE_EVENT_CLASS(regmap_bool,
++
++ TP_PROTO(struct device *dev, bool flag),
++
++ TP_ARGS(dev, flag),
++
++ TP_STRUCT__entry(
++ __string( name, dev_name(dev) )
++ __field( int, flag )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, dev_name(dev));
++ __entry->flag = flag;
++ ),
++
++ TP_printk("%s flag=%d", __get_str(name),
++ (int)__entry->flag)
++);
++
++DEFINE_EVENT(regmap_bool, regmap_cache_only,
++
++ TP_PROTO(struct device *dev, bool flag),
++
++ TP_ARGS(dev, flag)
++
++);
++
++DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
++
++ TP_PROTO(struct device *dev, bool flag),
++
++ TP_ARGS(dev, flag)
++
++);
++
++#endif /* _TRACE_REGMAP_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/regulator.h
+@@ -0,0 +1,141 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM regulator
++
++#if !defined(_TRACE_REGULATOR_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_REGULATOR_H
++
++#include <linux/ktime.h>
++#include <linux/tracepoint.h>
++
++/*
++ * Events which just log themselves and the regulator name for enable/disable
++ * type tracking.
++ */
++DECLARE_EVENT_CLASS(regulator_basic,
++
++ TP_PROTO(const char *name),
++
++ TP_ARGS(name),
++
++ TP_STRUCT__entry(
++ __string( name, name )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, name);
++ ),
++
++ TP_printk("name=%s", __get_str(name))
++
++);
++
++DEFINE_EVENT(regulator_basic, regulator_enable,
++
++ TP_PROTO(const char *name),
++
++ TP_ARGS(name)
++
++);
++
++DEFINE_EVENT(regulator_basic, regulator_enable_delay,
++
++ TP_PROTO(const char *name),
++
++ TP_ARGS(name)
++
++);
++
++DEFINE_EVENT(regulator_basic, regulator_enable_complete,
++
++ TP_PROTO(const char *name),
++
++ TP_ARGS(name)
++
++);
++
++DEFINE_EVENT(regulator_basic, regulator_disable,
++
++ TP_PROTO(const char *name),
++
++ TP_ARGS(name)
++
++);
++
++DEFINE_EVENT(regulator_basic, regulator_disable_complete,
++
++ TP_PROTO(const char *name),
++
++ TP_ARGS(name)
++
++);
++
++/*
++ * Events that take a range of numerical values, mostly for voltages
++ * and so on.
++ */
++DECLARE_EVENT_CLASS(regulator_range,
++
++ TP_PROTO(const char *name, int min, int max),
++
++ TP_ARGS(name, min, max),
++
++ TP_STRUCT__entry(
++ __string( name, name )
++ __field( int, min )
++ __field( int, max )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, name);
++ __entry->min = min;
++ __entry->max = max;
++ ),
++
++ TP_printk("name=%s (%d-%d)", __get_str(name),
++ (int)__entry->min, (int)__entry->max)
++);
++
++DEFINE_EVENT(regulator_range, regulator_set_voltage,
++
++ TP_PROTO(const char *name, int min, int max),
++
++ TP_ARGS(name, min, max)
++
++);
++
++
++/*
++ * Events that take a single value, mostly for readback and refcounts.
++ */
++DECLARE_EVENT_CLASS(regulator_value,
++
++ TP_PROTO(const char *name, unsigned int val),
++
++ TP_ARGS(name, val),
++
++ TP_STRUCT__entry(
++ __string( name, name )
++ __field( unsigned int, val )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, name);
++ __entry->val = val;
++ ),
++
++ TP_printk("name=%s, val=%u", __get_str(name),
++ (int)__entry->val)
++);
++
++DEFINE_EVENT(regulator_value, regulator_set_voltage_complete,
++
++ TP_PROTO(const char *name, unsigned int value),
++
++ TP_ARGS(name, value)
++
++);
++
++#endif /* _TRACE_POWER_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/rpm.h
+@@ -0,0 +1,100 @@
++
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM rpm
++
++#if !defined(_TRACE_RUNTIME_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_RUNTIME_POWER_H
++
++#include <linux/ktime.h>
++#include <linux/tracepoint.h>
++
++struct device;
++
++/*
++ * The rpm_internal events are used for tracing some important
++ * runtime pm internal functions.
++ */
++DECLARE_EVENT_CLASS(rpm_internal,
++
++ TP_PROTO(struct device *dev, int flags),
++
++ TP_ARGS(dev, flags),
++
++ TP_STRUCT__entry(
++ __string( name, dev_name(dev) )
++ __field( int, flags )
++ __field( int , usage_count )
++ __field( int , disable_depth )
++ __field( int , runtime_auto )
++ __field( int , request_pending )
++ __field( int , irq_safe )
++ __field( int , child_count )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, dev_name(dev));
++ __entry->flags = flags;
++ __entry->usage_count = atomic_read(
++ &dev->power.usage_count);
++ __entry->disable_depth = dev->power.disable_depth;
++ __entry->runtime_auto = dev->power.runtime_auto;
++ __entry->request_pending = dev->power.request_pending;
++ __entry->irq_safe = dev->power.irq_safe;
++ __entry->child_count = atomic_read(
++ &dev->power.child_count);
++ ),
++
++ TP_printk("%s flags-%x cnt-%-2d dep-%-2d auto-%-1d p-%-1d"
++ " irq-%-1d child-%d",
++ __get_str(name), __entry->flags,
++ __entry->usage_count,
++ __entry->disable_depth,
++ __entry->runtime_auto,
++ __entry->request_pending,
++ __entry->irq_safe,
++ __entry->child_count
++ )
++);
++DEFINE_EVENT(rpm_internal, rpm_suspend,
++
++ TP_PROTO(struct device *dev, int flags),
++
++ TP_ARGS(dev, flags)
++);
++DEFINE_EVENT(rpm_internal, rpm_resume,
++
++ TP_PROTO(struct device *dev, int flags),
++
++ TP_ARGS(dev, flags)
++);
++DEFINE_EVENT(rpm_internal, rpm_idle,
++
++ TP_PROTO(struct device *dev, int flags),
++
++ TP_ARGS(dev, flags)
++);
++
++TRACE_EVENT(rpm_return_int,
++ TP_PROTO(struct device *dev, unsigned long ip, int ret),
++ TP_ARGS(dev, ip, ret),
++
++ TP_STRUCT__entry(
++ __string( name, dev_name(dev))
++ __field( unsigned long, ip )
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ __assign_str(name, dev_name(dev));
++ __entry->ip = ip;
++ __entry->ret = ret;
++ ),
++
++ TP_printk("%pS:%s ret=%d", (void *)__entry->ip, __get_str(name),
++ __entry->ret)
++);
++
++#endif /* _TRACE_RUNTIME_POWER_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/sched.h
+@@ -0,0 +1,432 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM sched
++
++#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SCHED_H
++
++#include <linux/sched.h>
++#include <linux/tracepoint.h>
++#include <linux/binfmts.h>
++
++/*
++ * Tracepoint for calling kthread_stop, performed to end a kthread:
++ */
++TRACE_EVENT(sched_kthread_stop,
++
++ TP_PROTO(struct task_struct *t),
++
++ TP_ARGS(t),
++
++ TP_STRUCT__entry(
++ __array( char, comm, TASK_COMM_LEN )
++ __field( pid_t, pid )
++ ),
++
++ TP_fast_assign(
++ memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
++ __entry->pid = t->pid;
++ ),
++
++ TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
++);
++
++/*
++ * Tracepoint for the return value of the kthread stopping:
++ */
++TRACE_EVENT(sched_kthread_stop_ret,
++
++ TP_PROTO(int ret),
++
++ TP_ARGS(ret),
++
++ TP_STRUCT__entry(
++ __field( int, ret )
++ ),
++
++ TP_fast_assign(
++ __entry->ret = ret;
++ ),
++
++ TP_printk("ret=%d", __entry->ret)
++);
++
++/*
++ * Tracepoint for waking up a task:
++ */
++DECLARE_EVENT_CLASS(sched_wakeup_template,
++
++ TP_PROTO(struct task_struct *p, int success),
++
++ TP_ARGS(p, success),
++
++ TP_STRUCT__entry(
++ __array( char, comm, TASK_COMM_LEN )
++ __field( pid_t, pid )
++ __field( int, prio )
++ __field( int, success )
++ __field( int, target_cpu )
++ ),
++
++ TP_fast_assign(
++ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
++ __entry->pid = p->pid;
++ __entry->prio = p->prio;
++ __entry->success = success;
++ __entry->target_cpu = task_cpu(p);
++ ),
++
++ TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
++ __entry->comm, __entry->pid, __entry->prio,
++ __entry->success, __entry->target_cpu)
++);
++
++DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
++ TP_PROTO(struct task_struct *p, int success),
++ TP_ARGS(p, success));
++
++/*
++ * Tracepoint for waking up a new task:
++ */
++DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
++ TP_PROTO(struct task_struct *p, int success),
++ TP_ARGS(p, success));
++
++#ifdef CREATE_TRACE_POINTS
++static inline long __trace_sched_switch_state(struct task_struct *p)
++{
++ long state = p->state;
++
++#ifdef CONFIG_PREEMPT
++ /*
++ * For all intents and purposes a preempted task is a running task.
++ */
++ if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
++ state = TASK_RUNNING | TASK_STATE_MAX;
++#endif
++
++ return state;
++}
++#endif
++
++/*
++ * Tracepoint for task switches, performed by the scheduler:
++ */
++TRACE_EVENT(sched_switch,
++
++ TP_PROTO(struct task_struct *prev,
++ struct task_struct *next),
++
++ TP_ARGS(prev, next),
++
++ TP_STRUCT__entry(
++ __array( char, prev_comm, TASK_COMM_LEN )
++ __field( pid_t, prev_pid )
++ __field( int, prev_prio )
++ __field( long, prev_state )
++ __array( char, next_comm, TASK_COMM_LEN )
++ __field( pid_t, next_pid )
++ __field( int, next_prio )
++ ),
++
++ TP_fast_assign(
++ memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
++ __entry->prev_pid = prev->pid;
++ __entry->prev_prio = prev->prio;
++ __entry->prev_state = __trace_sched_switch_state(prev);
++ memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
++ __entry->next_pid = next->pid;
++ __entry->next_prio = next->prio;
++ ),
++
++ TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
++ __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
++ __entry->prev_state & (TASK_STATE_MAX-1) ?
++ __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
++ { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
++ { 16, "Z" }, { 32, "X" }, { 64, "x" },
++ { 128, "W" }) : "R",
++ __entry->prev_state & TASK_STATE_MAX ? "+" : "",
++ __entry->next_comm, __entry->next_pid, __entry->next_prio)
++);
++
++/*
++ * Tracepoint for a task being migrated:
++ */
++TRACE_EVENT(sched_migrate_task,
++
++ TP_PROTO(struct task_struct *p, int dest_cpu),
++
++ TP_ARGS(p, dest_cpu),
++
++ TP_STRUCT__entry(
++ __array( char, comm, TASK_COMM_LEN )
++ __field( pid_t, pid )
++ __field( int, prio )
++ __field( int, orig_cpu )
++ __field( int, dest_cpu )
++ ),
++
++ TP_fast_assign(
++ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
++ __entry->pid = p->pid;
++ __entry->prio = p->prio;
++ __entry->orig_cpu = task_cpu(p);
++ __entry->dest_cpu = dest_cpu;
++ ),
++
++ TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
++ __entry->comm, __entry->pid, __entry->prio,
++ __entry->orig_cpu, __entry->dest_cpu)
++);
++
++DECLARE_EVENT_CLASS(sched_process_template,
++
++ TP_PROTO(struct task_struct *p),
++
++ TP_ARGS(p),
++
++ TP_STRUCT__entry(
++ __array( char, comm, TASK_COMM_LEN )
++ __field( pid_t, pid )
++ __field( int, prio )
++ ),
++
++ TP_fast_assign(
++ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
++ __entry->pid = p->pid;
++ __entry->prio = p->prio;
++ ),
++
++ TP_printk("comm=%s pid=%d prio=%d",
++ __entry->comm, __entry->pid, __entry->prio)
++);
++
++/*
++ * Tracepoint for freeing a task:
++ */
++DEFINE_EVENT(sched_process_template, sched_process_free,
++ TP_PROTO(struct task_struct *p),
++ TP_ARGS(p));
++
++
++/*
++ * Tracepoint for a task exiting:
++ */
++DEFINE_EVENT(sched_process_template, sched_process_exit,
++ TP_PROTO(struct task_struct *p),
++ TP_ARGS(p));
++
++/*
++ * Tracepoint for waiting on task to unschedule:
++ */
++DEFINE_EVENT(sched_process_template, sched_wait_task,
++ TP_PROTO(struct task_struct *p),
++ TP_ARGS(p));
++
++/*
++ * Tracepoint for a waiting task:
++ */
++TRACE_EVENT(sched_process_wait,
++
++ TP_PROTO(struct pid *pid),
++
++ TP_ARGS(pid),
++
++ TP_STRUCT__entry(
++ __array( char, comm, TASK_COMM_LEN )
++ __field( pid_t, pid )
++ __field( int, prio )
++ ),
++
++ TP_fast_assign(
++ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
++ __entry->pid = pid_nr(pid);
++ __entry->prio = current->prio;
++ ),
++
++ TP_printk("comm=%s pid=%d prio=%d",
++ __entry->comm, __entry->pid, __entry->prio)
++);
++
++/*
++ * Tracepoint for do_fork:
++ */
++TRACE_EVENT(sched_process_fork,
++
++ TP_PROTO(struct task_struct *parent, struct task_struct *child),
++
++ TP_ARGS(parent, child),
++
++ TP_STRUCT__entry(
++ __array( char, parent_comm, TASK_COMM_LEN )
++ __field( pid_t, parent_pid )
++ __array( char, child_comm, TASK_COMM_LEN )
++ __field( pid_t, child_pid )
++ ),
++
++ TP_fast_assign(
++ memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
++ __entry->parent_pid = parent->pid;
++ memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
++ __entry->child_pid = child->pid;
++ ),
++
++ TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
++ __entry->parent_comm, __entry->parent_pid,
++ __entry->child_comm, __entry->child_pid)
++);
++
++/*
++ * Tracepoint for exec:
++ */
++TRACE_EVENT(sched_process_exec,
++
++ TP_PROTO(struct task_struct *p, pid_t old_pid,
++ struct linux_binprm *bprm),
++
++ TP_ARGS(p, old_pid, bprm),
++
++ TP_STRUCT__entry(
++ __string( filename, bprm->filename )
++ __field( pid_t, pid )
++ __field( pid_t, old_pid )
++ ),
++
++ TP_fast_assign(
++ __assign_str(filename, bprm->filename);
++ __entry->pid = p->pid;
++ __entry->old_pid = old_pid;
++ ),
++
++ TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
++ __entry->pid, __entry->old_pid)
++);
++
++/*
++ * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
++ * adding sched_stat support to SCHED_FIFO/RR would be welcome.
++ */
++DECLARE_EVENT_CLASS(sched_stat_template,
++
++ TP_PROTO(struct task_struct *tsk, u64 delay),
++
++ TP_ARGS(tsk, delay),
++
++ TP_STRUCT__entry(
++ __array( char, comm, TASK_COMM_LEN )
++ __field( pid_t, pid )
++ __field( u64, delay )
++ ),
++
++ TP_fast_assign(
++ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
++ __entry->pid = tsk->pid;
++ __entry->delay = delay;
++ )
++ TP_perf_assign(
++ __perf_count(delay);
++ ),
++
++ TP_printk("comm=%s pid=%d delay=%Lu [ns]",
++ __entry->comm, __entry->pid,
++ (unsigned long long)__entry->delay)
++);
++
++
++/*
++ * Tracepoint for accounting wait time (time the task is runnable
++ * but not actually running due to scheduler contention).
++ */
++DEFINE_EVENT(sched_stat_template, sched_stat_wait,
++ TP_PROTO(struct task_struct *tsk, u64 delay),
++ TP_ARGS(tsk, delay));
++
++/*
++ * Tracepoint for accounting sleep time (time the task is not runnable,
++ * including iowait, see below).
++ */
++DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
++ TP_PROTO(struct task_struct *tsk, u64 delay),
++ TP_ARGS(tsk, delay));
++
++/*
++ * Tracepoint for accounting iowait time (time the task is not runnable
++ * due to waiting on IO to complete).
++ */
++DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
++ TP_PROTO(struct task_struct *tsk, u64 delay),
++ TP_ARGS(tsk, delay));
++
++/*
++ * Tracepoint for accounting blocked time (time the task is in uninterruptible).
++ */
++DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
++ TP_PROTO(struct task_struct *tsk, u64 delay),
++ TP_ARGS(tsk, delay));
++
++/*
++ * Tracepoint for accounting runtime (time the task is executing
++ * on a CPU).
++ */
++TRACE_EVENT(sched_stat_runtime,
++
++ TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
++
++ TP_ARGS(tsk, runtime, vruntime),
++
++ TP_STRUCT__entry(
++ __array( char, comm, TASK_COMM_LEN )
++ __field( pid_t, pid )
++ __field( u64, runtime )
++ __field( u64, vruntime )
++ ),
++
++ TP_fast_assign(
++ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
++ __entry->pid = tsk->pid;
++ __entry->runtime = runtime;
++ __entry->vruntime = vruntime;
++ )
++ TP_perf_assign(
++ __perf_count(runtime);
++ ),
++
++ TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
++ __entry->comm, __entry->pid,
++ (unsigned long long)__entry->runtime,
++ (unsigned long long)__entry->vruntime)
++);
++
++/*
++ * Tracepoint for showing priority inheritance modifying a tasks
++ * priority.
++ */
++TRACE_EVENT(sched_pi_setprio,
++
++ TP_PROTO(struct task_struct *tsk, int newprio),
++
++ TP_ARGS(tsk, newprio),
++
++ TP_STRUCT__entry(
++ __array( char, comm, TASK_COMM_LEN )
++ __field( pid_t, pid )
++ __field( int, oldprio )
++ __field( int, newprio )
++ ),
++
++ TP_fast_assign(
++ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
++ __entry->pid = tsk->pid;
++ __entry->oldprio = tsk->prio;
++ __entry->newprio = newprio;
++ ),
++
++ TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
++ __entry->comm, __entry->pid,
++ __entry->oldprio, __entry->newprio)
++);
++
++#endif /* _TRACE_SCHED_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/scsi.h
+@@ -0,0 +1,365 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM scsi
++
++#if !defined(_TRACE_SCSI_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SCSI_H
++
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_host.h>
++#include <linux/tracepoint.h>
++#include <linux/trace_seq.h>
++
++#define scsi_opcode_name(opcode) { opcode, #opcode }
++#define show_opcode_name(val) \
++ __print_symbolic(val, \
++ scsi_opcode_name(TEST_UNIT_READY), \
++ scsi_opcode_name(REZERO_UNIT), \
++ scsi_opcode_name(REQUEST_SENSE), \
++ scsi_opcode_name(FORMAT_UNIT), \
++ scsi_opcode_name(READ_BLOCK_LIMITS), \
++ scsi_opcode_name(REASSIGN_BLOCKS), \
++ scsi_opcode_name(INITIALIZE_ELEMENT_STATUS), \
++ scsi_opcode_name(READ_6), \
++ scsi_opcode_name(WRITE_6), \
++ scsi_opcode_name(SEEK_6), \
++ scsi_opcode_name(READ_REVERSE), \
++ scsi_opcode_name(WRITE_FILEMARKS), \
++ scsi_opcode_name(SPACE), \
++ scsi_opcode_name(INQUIRY), \
++ scsi_opcode_name(RECOVER_BUFFERED_DATA), \
++ scsi_opcode_name(MODE_SELECT), \
++ scsi_opcode_name(RESERVE), \
++ scsi_opcode_name(RELEASE), \
++ scsi_opcode_name(COPY), \
++ scsi_opcode_name(ERASE), \
++ scsi_opcode_name(MODE_SENSE), \
++ scsi_opcode_name(START_STOP), \
++ scsi_opcode_name(RECEIVE_DIAGNOSTIC), \
++ scsi_opcode_name(SEND_DIAGNOSTIC), \
++ scsi_opcode_name(ALLOW_MEDIUM_REMOVAL), \
++ scsi_opcode_name(SET_WINDOW), \
++ scsi_opcode_name(READ_CAPACITY), \
++ scsi_opcode_name(READ_10), \
++ scsi_opcode_name(WRITE_10), \
++ scsi_opcode_name(SEEK_10), \
++ scsi_opcode_name(POSITION_TO_ELEMENT), \
++ scsi_opcode_name(WRITE_VERIFY), \
++ scsi_opcode_name(VERIFY), \
++ scsi_opcode_name(SEARCH_HIGH), \
++ scsi_opcode_name(SEARCH_EQUAL), \
++ scsi_opcode_name(SEARCH_LOW), \
++ scsi_opcode_name(SET_LIMITS), \
++ scsi_opcode_name(PRE_FETCH), \
++ scsi_opcode_name(READ_POSITION), \
++ scsi_opcode_name(SYNCHRONIZE_CACHE), \
++ scsi_opcode_name(LOCK_UNLOCK_CACHE), \
++ scsi_opcode_name(READ_DEFECT_DATA), \
++ scsi_opcode_name(MEDIUM_SCAN), \
++ scsi_opcode_name(COMPARE), \
++ scsi_opcode_name(COPY_VERIFY), \
++ scsi_opcode_name(WRITE_BUFFER), \
++ scsi_opcode_name(READ_BUFFER), \
++ scsi_opcode_name(UPDATE_BLOCK), \
++ scsi_opcode_name(READ_LONG), \
++ scsi_opcode_name(WRITE_LONG), \
++ scsi_opcode_name(CHANGE_DEFINITION), \
++ scsi_opcode_name(WRITE_SAME), \
++ scsi_opcode_name(UNMAP), \
++ scsi_opcode_name(READ_TOC), \
++ scsi_opcode_name(LOG_SELECT), \
++ scsi_opcode_name(LOG_SENSE), \
++ scsi_opcode_name(XDWRITEREAD_10), \
++ scsi_opcode_name(MODE_SELECT_10), \
++ scsi_opcode_name(RESERVE_10), \
++ scsi_opcode_name(RELEASE_10), \
++ scsi_opcode_name(MODE_SENSE_10), \
++ scsi_opcode_name(PERSISTENT_RESERVE_IN), \
++ scsi_opcode_name(PERSISTENT_RESERVE_OUT), \
++ scsi_opcode_name(VARIABLE_LENGTH_CMD), \
++ scsi_opcode_name(REPORT_LUNS), \
++ scsi_opcode_name(MAINTENANCE_IN), \
++ scsi_opcode_name(MAINTENANCE_OUT), \
++ scsi_opcode_name(MOVE_MEDIUM), \
++ scsi_opcode_name(EXCHANGE_MEDIUM), \
++ scsi_opcode_name(READ_12), \
++ scsi_opcode_name(WRITE_12), \
++ scsi_opcode_name(WRITE_VERIFY_12), \
++ scsi_opcode_name(SEARCH_HIGH_12), \
++ scsi_opcode_name(SEARCH_EQUAL_12), \
++ scsi_opcode_name(SEARCH_LOW_12), \
++ scsi_opcode_name(READ_ELEMENT_STATUS), \
++ scsi_opcode_name(SEND_VOLUME_TAG), \
++ scsi_opcode_name(WRITE_LONG_2), \
++ scsi_opcode_name(READ_16), \
++ scsi_opcode_name(WRITE_16), \
++ scsi_opcode_name(VERIFY_16), \
++ scsi_opcode_name(WRITE_SAME_16), \
++ scsi_opcode_name(SERVICE_ACTION_IN), \
++ scsi_opcode_name(SAI_READ_CAPACITY_16), \
++ scsi_opcode_name(SAI_GET_LBA_STATUS), \
++ scsi_opcode_name(MI_REPORT_TARGET_PGS), \
++ scsi_opcode_name(MO_SET_TARGET_PGS), \
++ scsi_opcode_name(READ_32), \
++ scsi_opcode_name(WRITE_32), \
++ scsi_opcode_name(WRITE_SAME_32), \
++ scsi_opcode_name(ATA_16), \
++ scsi_opcode_name(ATA_12))
++
++#define scsi_hostbyte_name(result) { result, #result }
++#define show_hostbyte_name(val) \
++ __print_symbolic(val, \
++ scsi_hostbyte_name(DID_OK), \
++ scsi_hostbyte_name(DID_NO_CONNECT), \
++ scsi_hostbyte_name(DID_BUS_BUSY), \
++ scsi_hostbyte_name(DID_TIME_OUT), \
++ scsi_hostbyte_name(DID_BAD_TARGET), \
++ scsi_hostbyte_name(DID_ABORT), \
++ scsi_hostbyte_name(DID_PARITY), \
++ scsi_hostbyte_name(DID_ERROR), \
++ scsi_hostbyte_name(DID_RESET), \
++ scsi_hostbyte_name(DID_BAD_INTR), \
++ scsi_hostbyte_name(DID_PASSTHROUGH), \
++ scsi_hostbyte_name(DID_SOFT_ERROR), \
++ scsi_hostbyte_name(DID_IMM_RETRY), \
++ scsi_hostbyte_name(DID_REQUEUE), \
++ scsi_hostbyte_name(DID_TRANSPORT_DISRUPTED), \
++ scsi_hostbyte_name(DID_TRANSPORT_FAILFAST))
++
++#define scsi_driverbyte_name(result) { result, #result }
++#define show_driverbyte_name(val) \
++ __print_symbolic(val, \
++ scsi_driverbyte_name(DRIVER_OK), \
++ scsi_driverbyte_name(DRIVER_BUSY), \
++ scsi_driverbyte_name(DRIVER_SOFT), \
++ scsi_driverbyte_name(DRIVER_MEDIA), \
++ scsi_driverbyte_name(DRIVER_ERROR), \
++ scsi_driverbyte_name(DRIVER_INVALID), \
++ scsi_driverbyte_name(DRIVER_TIMEOUT), \
++ scsi_driverbyte_name(DRIVER_HARD), \
++ scsi_driverbyte_name(DRIVER_SENSE))
++
++#define scsi_msgbyte_name(result) { result, #result }
++#define show_msgbyte_name(val) \
++ __print_symbolic(val, \
++ scsi_msgbyte_name(COMMAND_COMPLETE), \
++ scsi_msgbyte_name(EXTENDED_MESSAGE), \
++ scsi_msgbyte_name(SAVE_POINTERS), \
++ scsi_msgbyte_name(RESTORE_POINTERS), \
++ scsi_msgbyte_name(DISCONNECT), \
++ scsi_msgbyte_name(INITIATOR_ERROR), \
++ scsi_msgbyte_name(ABORT_TASK_SET), \
++ scsi_msgbyte_name(MESSAGE_REJECT), \
++ scsi_msgbyte_name(NOP), \
++ scsi_msgbyte_name(MSG_PARITY_ERROR), \
++ scsi_msgbyte_name(LINKED_CMD_COMPLETE), \
++ scsi_msgbyte_name(LINKED_FLG_CMD_COMPLETE), \
++ scsi_msgbyte_name(TARGET_RESET), \
++ scsi_msgbyte_name(ABORT_TASK), \
++ scsi_msgbyte_name(CLEAR_TASK_SET), \
++ scsi_msgbyte_name(INITIATE_RECOVERY), \
++ scsi_msgbyte_name(RELEASE_RECOVERY), \
++ scsi_msgbyte_name(CLEAR_ACA), \
++ scsi_msgbyte_name(LOGICAL_UNIT_RESET), \
++ scsi_msgbyte_name(SIMPLE_QUEUE_TAG), \
++ scsi_msgbyte_name(HEAD_OF_QUEUE_TAG), \
++ scsi_msgbyte_name(ORDERED_QUEUE_TAG), \
++ scsi_msgbyte_name(IGNORE_WIDE_RESIDUE), \
++ scsi_msgbyte_name(ACA), \
++ scsi_msgbyte_name(QAS_REQUEST), \
++ scsi_msgbyte_name(BUS_DEVICE_RESET), \
++ scsi_msgbyte_name(ABORT))
++
++#define scsi_statusbyte_name(result) { result, #result }
++#define show_statusbyte_name(val) \
++ __print_symbolic(val, \
++ scsi_statusbyte_name(SAM_STAT_GOOD), \
++ scsi_statusbyte_name(SAM_STAT_CHECK_CONDITION), \
++ scsi_statusbyte_name(SAM_STAT_CONDITION_MET), \
++ scsi_statusbyte_name(SAM_STAT_BUSY), \
++ scsi_statusbyte_name(SAM_STAT_INTERMEDIATE), \
++ scsi_statusbyte_name(SAM_STAT_INTERMEDIATE_CONDITION_MET), \
++ scsi_statusbyte_name(SAM_STAT_RESERVATION_CONFLICT), \
++ scsi_statusbyte_name(SAM_STAT_COMMAND_TERMINATED), \
++ scsi_statusbyte_name(SAM_STAT_TASK_SET_FULL), \
++ scsi_statusbyte_name(SAM_STAT_ACA_ACTIVE), \
++ scsi_statusbyte_name(SAM_STAT_TASK_ABORTED))
++
++#define scsi_prot_op_name(result) { result, #result }
++#define show_prot_op_name(val) \
++ __print_symbolic(val, \
++ scsi_prot_op_name(SCSI_PROT_NORMAL), \
++ scsi_prot_op_name(SCSI_PROT_READ_INSERT), \
++ scsi_prot_op_name(SCSI_PROT_WRITE_STRIP), \
++ scsi_prot_op_name(SCSI_PROT_READ_STRIP), \
++ scsi_prot_op_name(SCSI_PROT_WRITE_INSERT), \
++ scsi_prot_op_name(SCSI_PROT_READ_PASS), \
++ scsi_prot_op_name(SCSI_PROT_WRITE_PASS))
++
++const char *scsi_trace_parse_cdb(struct trace_seq*, unsigned char*, int);
++#define __parse_cdb(cdb, len) scsi_trace_parse_cdb(p, cdb, len)
++
++TRACE_EVENT(scsi_dispatch_cmd_start,
++
++ TP_PROTO(struct scsi_cmnd *cmd),
++
++ TP_ARGS(cmd),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, host_no )
++ __field( unsigned int, channel )
++ __field( unsigned int, id )
++ __field( unsigned int, lun )
++ __field( unsigned int, opcode )
++ __field( unsigned int, cmd_len )
++ __field( unsigned int, data_sglen )
++ __field( unsigned int, prot_sglen )
++ __field( unsigned char, prot_op )
++ __dynamic_array(unsigned char, cmnd, cmd->cmd_len)
++ ),
++
++ TP_fast_assign(
++ __entry->host_no = cmd->device->host->host_no;
++ __entry->channel = cmd->device->channel;
++ __entry->id = cmd->device->id;
++ __entry->lun = cmd->device->lun;
++ __entry->opcode = cmd->cmnd[0];
++ __entry->cmd_len = cmd->cmd_len;
++ __entry->data_sglen = scsi_sg_count(cmd);
++ __entry->prot_sglen = scsi_prot_sg_count(cmd);
++ __entry->prot_op = scsi_get_prot_op(cmd);
++ memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
++ ),
++
++ TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
++ " prot_op=%s cmnd=(%s %s raw=%s)",
++ __entry->host_no, __entry->channel, __entry->id,
++ __entry->lun, __entry->data_sglen, __entry->prot_sglen,
++ show_prot_op_name(__entry->prot_op),
++ show_opcode_name(__entry->opcode),
++ __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
++ __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len))
++);
++
++TRACE_EVENT(scsi_dispatch_cmd_error,
++
++ TP_PROTO(struct scsi_cmnd *cmd, int rtn),
++
++ TP_ARGS(cmd, rtn),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, host_no )
++ __field( unsigned int, channel )
++ __field( unsigned int, id )
++ __field( unsigned int, lun )
++ __field( int, rtn )
++ __field( unsigned int, opcode )
++ __field( unsigned int, cmd_len )
++ __field( unsigned int, data_sglen )
++ __field( unsigned int, prot_sglen )
++ __field( unsigned char, prot_op )
++ __dynamic_array(unsigned char, cmnd, cmd->cmd_len)
++ ),
++
++ TP_fast_assign(
++ __entry->host_no = cmd->device->host->host_no;
++ __entry->channel = cmd->device->channel;
++ __entry->id = cmd->device->id;
++ __entry->lun = cmd->device->lun;
++ __entry->rtn = rtn;
++ __entry->opcode = cmd->cmnd[0];
++ __entry->cmd_len = cmd->cmd_len;
++ __entry->data_sglen = scsi_sg_count(cmd);
++ __entry->prot_sglen = scsi_prot_sg_count(cmd);
++ __entry->prot_op = scsi_get_prot_op(cmd);
++ memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
++ ),
++
++ TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
++ " prot_op=%s cmnd=(%s %s raw=%s) rtn=%d",
++ __entry->host_no, __entry->channel, __entry->id,
++ __entry->lun, __entry->data_sglen, __entry->prot_sglen,
++ show_prot_op_name(__entry->prot_op),
++ show_opcode_name(__entry->opcode),
++ __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
++ __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
++ __entry->rtn)
++);
++
++DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
++
++ TP_PROTO(struct scsi_cmnd *cmd),
++
++ TP_ARGS(cmd),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, host_no )
++ __field( unsigned int, channel )
++ __field( unsigned int, id )
++ __field( unsigned int, lun )
++ __field( int, result )
++ __field( unsigned int, opcode )
++ __field( unsigned int, cmd_len )
++ __field( unsigned int, data_sglen )
++ __field( unsigned int, prot_sglen )
++ __field( unsigned char, prot_op )
++ __dynamic_array(unsigned char, cmnd, cmd->cmd_len)
++ ),
++
++ TP_fast_assign(
++ __entry->host_no = cmd->device->host->host_no;
++ __entry->channel = cmd->device->channel;
++ __entry->id = cmd->device->id;
++ __entry->lun = cmd->device->lun;
++ __entry->result = cmd->result;
++ __entry->opcode = cmd->cmnd[0];
++ __entry->cmd_len = cmd->cmd_len;
++ __entry->data_sglen = scsi_sg_count(cmd);
++ __entry->prot_sglen = scsi_prot_sg_count(cmd);
++ __entry->prot_op = scsi_get_prot_op(cmd);
++ memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
++ ),
++
++ TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u " \
++ "prot_sgl=%u prot_op=%s cmnd=(%s %s raw=%s) result=(driver=" \
++ "%s host=%s message=%s status=%s)",
++ __entry->host_no, __entry->channel, __entry->id,
++ __entry->lun, __entry->data_sglen, __entry->prot_sglen,
++ show_prot_op_name(__entry->prot_op),
++ show_opcode_name(__entry->opcode),
++ __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
++ __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
++ show_driverbyte_name(((__entry->result) >> 24) & 0xff),
++ show_hostbyte_name(((__entry->result) >> 16) & 0xff),
++ show_msgbyte_name(((__entry->result) >> 8) & 0xff),
++ show_statusbyte_name(__entry->result & 0xff))
++);
++
++DEFINE_EVENT(scsi_cmd_done_timeout_template, scsi_dispatch_cmd_done,
++ TP_PROTO(struct scsi_cmnd *cmd),
++ TP_ARGS(cmd));
++
++DEFINE_EVENT(scsi_cmd_done_timeout_template, scsi_dispatch_cmd_timeout,
++ TP_PROTO(struct scsi_cmnd *cmd),
++ TP_ARGS(cmd));
++
++TRACE_EVENT(scsi_eh_wakeup,
++
++ TP_PROTO(struct Scsi_Host *shost),
++
++ TP_ARGS(shost),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, host_no )
++ ),
++
++ TP_fast_assign(
++ __entry->host_no = shost->host_no;
++ ),
++
++ TP_printk("host_no=%u", __entry->host_no)
++);
++
++#endif /* _TRACE_SCSI_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/signal.h
+@@ -0,0 +1,125 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM signal
++
++#if !defined(_TRACE_SIGNAL_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SIGNAL_H
++
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/tracepoint.h>
++
++#define TP_STORE_SIGINFO(__entry, info) \
++ do { \
++ if (info == SEND_SIG_NOINFO || \
++ info == SEND_SIG_FORCED) { \
++ __entry->errno = 0; \
++ __entry->code = SI_USER; \
++ } else if (info == SEND_SIG_PRIV) { \
++ __entry->errno = 0; \
++ __entry->code = SI_KERNEL; \
++ } else { \
++ __entry->errno = info->si_errno; \
++ __entry->code = info->si_code; \
++ } \
++ } while (0)
++
++#ifndef TRACE_HEADER_MULTI_READ
++enum {
++ TRACE_SIGNAL_DELIVERED,
++ TRACE_SIGNAL_IGNORED,
++ TRACE_SIGNAL_ALREADY_PENDING,
++ TRACE_SIGNAL_OVERFLOW_FAIL,
++ TRACE_SIGNAL_LOSE_INFO,
++};
++#endif
++
++/**
++ * signal_generate - called when a signal is generated
++ * @sig: signal number
++ * @info: pointer to struct siginfo
++ * @task: pointer to struct task_struct
++ * @group: shared or private
++ * @result: TRACE_SIGNAL_*
++ *
++ * Current process sends a 'sig' signal to 'task' process with
++ * 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV,
++ * 'info' is not a pointer and you can't access its field. Instead,
++ * SEND_SIG_NOINFO means that si_code is SI_USER, and SEND_SIG_PRIV
++ * means that si_code is SI_KERNEL.
++ */
++TRACE_EVENT(signal_generate,
++
++ TP_PROTO(int sig, struct siginfo *info, struct task_struct *task,
++ int group, int result),
++
++ TP_ARGS(sig, info, task, group, result),
++
++ TP_STRUCT__entry(
++ __field( int, sig )
++ __field( int, errno )
++ __field( int, code )
++ __array( char, comm, TASK_COMM_LEN )
++ __field( pid_t, pid )
++ __field( int, group )
++ __field( int, result )
++ ),
++
++ TP_fast_assign(
++ __entry->sig = sig;
++ TP_STORE_SIGINFO(__entry, info);
++ memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
++ __entry->pid = task->pid;
++ __entry->group = group;
++ __entry->result = result;
++ ),
++
++ TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d grp=%d res=%d",
++ __entry->sig, __entry->errno, __entry->code,
++ __entry->comm, __entry->pid, __entry->group,
++ __entry->result)
++);
++
++/**
++ * signal_deliver - called when a signal is delivered
++ * @sig: signal number
++ * @info: pointer to struct siginfo
++ * @ka: pointer to struct k_sigaction
++ *
++ * A 'sig' signal is delivered to current process with 'info' siginfo,
++ * and it will be handled by 'ka'. ka->sa.sa_handler can be SIG_IGN or
++ * SIG_DFL.
++ * Note that some signals reported by signal_generate tracepoint can be
++ * lost, ignored or modified (by debugger) before hitting this tracepoint.
++ * This means, this can show which signals are actually delivered, but
++ * matching generated signals and delivered signals may not be correct.
++ */
++TRACE_EVENT(signal_deliver,
++
++ TP_PROTO(int sig, struct siginfo *info, struct k_sigaction *ka),
++
++ TP_ARGS(sig, info, ka),
++
++ TP_STRUCT__entry(
++ __field( int, sig )
++ __field( int, errno )
++ __field( int, code )
++ __field( unsigned long, sa_handler )
++ __field( unsigned long, sa_flags )
++ ),
++
++ TP_fast_assign(
++ __entry->sig = sig;
++ TP_STORE_SIGINFO(__entry, info);
++ __entry->sa_handler = (unsigned long)ka->sa.sa_handler;
++ __entry->sa_flags = ka->sa.sa_flags;
++ ),
++
++ TP_printk("sig=%d errno=%d code=%d sa_handler=%lx sa_flags=%lx",
++ __entry->sig, __entry->errno, __entry->code,
++ __entry->sa_handler, __entry->sa_flags)
++);
++
++#endif /* _TRACE_SIGNAL_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/skb.h
+@@ -0,0 +1,75 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM skb
++
++#if !defined(_TRACE_SKB_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SKB_H
++
++#include <linux/skbuff.h>
++#include <linux/netdevice.h>
++#include <linux/tracepoint.h>
++
++/*
++ * Tracepoint for free an sk_buff:
++ */
++TRACE_EVENT(kfree_skb,
++
++ TP_PROTO(struct sk_buff *skb, void *location),
++
++ TP_ARGS(skb, location),
++
++ TP_STRUCT__entry(
++ __field( void *, skbaddr )
++ __field( void *, location )
++ __field( unsigned short, protocol )
++ ),
++
++ TP_fast_assign(
++ __entry->skbaddr = skb;
++ __entry->location = location;
++ __entry->protocol = ntohs(skb->protocol);
++ ),
++
++ TP_printk("skbaddr=%p protocol=%u location=%p",
++ __entry->skbaddr, __entry->protocol, __entry->location)
++);
++
++TRACE_EVENT(consume_skb,
++
++ TP_PROTO(struct sk_buff *skb),
++
++ TP_ARGS(skb),
++
++ TP_STRUCT__entry(
++ __field( void *, skbaddr )
++ ),
++
++ TP_fast_assign(
++ __entry->skbaddr = skb;
++ ),
++
++ TP_printk("skbaddr=%p", __entry->skbaddr)
++);
++
++TRACE_EVENT(skb_copy_datagram_iovec,
++
++ TP_PROTO(const struct sk_buff *skb, int len),
++
++ TP_ARGS(skb, len),
++
++ TP_STRUCT__entry(
++ __field( const void *, skbaddr )
++ __field( int, len )
++ ),
++
++ TP_fast_assign(
++ __entry->skbaddr = skb;
++ __entry->len = len;
++ ),
++
++ TP_printk("skbaddr=%p len=%d", __entry->skbaddr, __entry->len)
++);
++
++#endif /* _TRACE_SKB_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/sock.h
+@@ -0,0 +1,68 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM sock
++
++#if !defined(_TRACE_SOCK_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SOCK_H
++
++#include <net/sock.h>
++#include <linux/tracepoint.h>
++
++TRACE_EVENT(sock_rcvqueue_full,
++
++ TP_PROTO(struct sock *sk, struct sk_buff *skb),
++
++ TP_ARGS(sk, skb),
++
++ TP_STRUCT__entry(
++ __field(int, rmem_alloc)
++ __field(unsigned int, truesize)
++ __field(int, sk_rcvbuf)
++ ),
++
++ TP_fast_assign(
++ __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
++ __entry->truesize = skb->truesize;
++ __entry->sk_rcvbuf = sk->sk_rcvbuf;
++ ),
++
++ TP_printk("rmem_alloc=%d truesize=%u sk_rcvbuf=%d",
++ __entry->rmem_alloc, __entry->truesize, __entry->sk_rcvbuf)
++);
++
++TRACE_EVENT(sock_exceed_buf_limit,
++
++ TP_PROTO(struct sock *sk, struct proto *prot, long allocated),
++
++ TP_ARGS(sk, prot, allocated),
++
++ TP_STRUCT__entry(
++ __array(char, name, 32)
++ __field(long *, sysctl_mem)
++ __field(long, allocated)
++ __field(int, sysctl_rmem)
++ __field(int, rmem_alloc)
++ ),
++
++ TP_fast_assign(
++ strncpy(__entry->name, prot->name, 32);
++ __entry->sysctl_mem = prot->sysctl_mem;
++ __entry->allocated = allocated;
++ __entry->sysctl_rmem = prot->sysctl_rmem[0];
++ __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
++ ),
++
++ TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld "
++ "sysctl_rmem=%d rmem_alloc=%d",
++ __entry->name,
++ __entry->sysctl_mem[0],
++ __entry->sysctl_mem[1],
++ __entry->sysctl_mem[2],
++ __entry->allocated,
++ __entry->sysctl_rmem,
++ __entry->rmem_alloc)
++);
++
++#endif /* _TRACE_SOCK_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/sunrpc.h
+@@ -0,0 +1,177 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM sunrpc
++
++#if !defined(_TRACE_SUNRPC_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SUNRPC_H
++
++#include <linux/sunrpc/sched.h>
++#include <linux/sunrpc/clnt.h>
++#include <linux/tracepoint.h>
++
++DECLARE_EVENT_CLASS(rpc_task_status,
++
++ TP_PROTO(struct rpc_task *task),
++
++ TP_ARGS(task),
++
++ TP_STRUCT__entry(
++ __field(const struct rpc_task *, task)
++ __field(const struct rpc_clnt *, clnt)
++ __field(int, status)
++ ),
++
++ TP_fast_assign(
++ __entry->task = task;
++ __entry->clnt = task->tk_client;
++ __entry->status = task->tk_status;
++ ),
++
++ TP_printk("task:%p@%p, status %d",__entry->task, __entry->clnt, __entry->status)
++);
++
++DEFINE_EVENT(rpc_task_status, rpc_call_status,
++ TP_PROTO(struct rpc_task *task),
++
++ TP_ARGS(task)
++);
++
++DEFINE_EVENT(rpc_task_status, rpc_bind_status,
++ TP_PROTO(struct rpc_task *task),
++
++ TP_ARGS(task)
++);
++
++TRACE_EVENT(rpc_connect_status,
++ TP_PROTO(struct rpc_task *task, int status),
++
++ TP_ARGS(task, status),
++
++ TP_STRUCT__entry(
++ __field(const struct rpc_task *, task)
++ __field(const struct rpc_clnt *, clnt)
++ __field(int, status)
++ ),
++
++ TP_fast_assign(
++ __entry->task = task;
++ __entry->clnt = task->tk_client;
++ __entry->status = status;
++ ),
++
++ TP_printk("task:%p@%p, status %d",__entry->task, __entry->clnt, __entry->status)
++);
++
++DECLARE_EVENT_CLASS(rpc_task_running,
++
++ TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
++
++ TP_ARGS(clnt, task, action),
++
++ TP_STRUCT__entry(
++ __field(const struct rpc_clnt *, clnt)
++ __field(const struct rpc_task *, task)
++ __field(const void *, action)
++ __field(unsigned long, runstate)
++ __field(int, status)
++ __field(unsigned short, flags)
++ ),
++
++ TP_fast_assign(
++ __entry->clnt = clnt;
++ __entry->task = task;
++ __entry->action = action;
++ __entry->runstate = task->tk_runstate;
++ __entry->status = task->tk_status;
++ __entry->flags = task->tk_flags;
++ ),
++
++ TP_printk("task:%p@%p flags=%4.4x state=%4.4lx status=%d action=%pf",
++ __entry->task,
++ __entry->clnt,
++ __entry->flags,
++ __entry->runstate,
++ __entry->status,
++ __entry->action
++ )
++);
++
++DEFINE_EVENT(rpc_task_running, rpc_task_begin,
++
++ TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
++
++ TP_ARGS(clnt, task, action)
++
++);
++
++DEFINE_EVENT(rpc_task_running, rpc_task_run_action,
++
++ TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
++
++ TP_ARGS(clnt, task, action)
++
++);
++
++DEFINE_EVENT(rpc_task_running, rpc_task_complete,
++
++ TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
++
++ TP_ARGS(clnt, task, action)
++
++);
++
++DECLARE_EVENT_CLASS(rpc_task_queued,
++
++ TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
++
++ TP_ARGS(clnt, task, q),
++
++ TP_STRUCT__entry(
++ __field(const struct rpc_clnt *, clnt)
++ __field(const struct rpc_task *, task)
++ __field(unsigned long, timeout)
++ __field(unsigned long, runstate)
++ __field(int, status)
++ __field(unsigned short, flags)
++ __string(q_name, rpc_qname(q))
++ ),
++
++ TP_fast_assign(
++ __entry->clnt = clnt;
++ __entry->task = task;
++ __entry->timeout = task->tk_timeout;
++ __entry->runstate = task->tk_runstate;
++ __entry->status = task->tk_status;
++ __entry->flags = task->tk_flags;
++ __assign_str(q_name, rpc_qname(q));
++ ),
++
++ TP_printk("task:%p@%p flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s",
++ __entry->task,
++ __entry->clnt,
++ __entry->flags,
++ __entry->runstate,
++ __entry->status,
++ __entry->timeout,
++ __get_str(q_name)
++ )
++);
++
++DEFINE_EVENT(rpc_task_queued, rpc_task_sleep,
++
++ TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
++
++ TP_ARGS(clnt, task, q)
++
++);
++
++DEFINE_EVENT(rpc_task_queued, rpc_task_wakeup,
++
++ TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q),
++
++ TP_ARGS(clnt, task, q)
++
++);
++
++#endif /* _TRACE_SUNRPC_H */
++
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/syscalls.h
+@@ -0,0 +1,75 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM raw_syscalls
++#define TRACE_INCLUDE_FILE syscalls
++
++#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_EVENTS_SYSCALLS_H
++
++#include <linux/tracepoint.h>
++
++#include <asm/ptrace.h>
++#include <asm/syscall.h>
++
++
++#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
++
++extern void syscall_regfunc(void);
++extern void syscall_unregfunc(void);
++
++TRACE_EVENT_FN(sys_enter,
++
++ TP_PROTO(struct pt_regs *regs, long id),
++
++ TP_ARGS(regs, id),
++
++ TP_STRUCT__entry(
++ __field( long, id )
++ __array( unsigned long, args, 6 )
++ ),
++
++ TP_fast_assign(
++ __entry->id = id;
++ syscall_get_arguments(current, regs, 0, 6, __entry->args);
++ ),
++
++ TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
++ __entry->id,
++ __entry->args[0], __entry->args[1], __entry->args[2],
++ __entry->args[3], __entry->args[4], __entry->args[5]),
++
++ syscall_regfunc, syscall_unregfunc
++);
++
++TRACE_EVENT_FLAGS(sys_enter, TRACE_EVENT_FL_CAP_ANY)
++
++TRACE_EVENT_FN(sys_exit,
++
++ TP_PROTO(struct pt_regs *regs, long ret),
++
++ TP_ARGS(regs, ret),
++
++ TP_STRUCT__entry(
++ __field( long, id )
++ __field( long, ret )
++ ),
++
++ TP_fast_assign(
++ __entry->id = syscall_get_nr(current, regs);
++ __entry->ret = ret;
++ ),
++
++ TP_printk("NR %ld = %ld",
++ __entry->id, __entry->ret),
++
++ syscall_regfunc, syscall_unregfunc
++);
++
++TRACE_EVENT_FLAGS(sys_exit, TRACE_EVENT_FL_CAP_ANY)
++
++#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
++
++#endif /* _TRACE_EVENTS_SYSCALLS_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
++
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/timer.h
+@@ -0,0 +1,329 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM timer
++
++#if !defined(_TRACE_TIMER_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_TIMER_H
++
++#include <linux/tracepoint.h>
++#include <linux/hrtimer.h>
++#include <linux/timer.h>
++
++DECLARE_EVENT_CLASS(timer_class,
++
++ TP_PROTO(struct timer_list *timer),
++
++ TP_ARGS(timer),
++
++ TP_STRUCT__entry(
++ __field( void *, timer )
++ ),
++
++ TP_fast_assign(
++ __entry->timer = timer;
++ ),
++
++ TP_printk("timer=%p", __entry->timer)
++);
++
++/**
++ * timer_init - called when the timer is initialized
++ * @timer: pointer to struct timer_list
++ */
++DEFINE_EVENT(timer_class, timer_init,
++
++ TP_PROTO(struct timer_list *timer),
++
++ TP_ARGS(timer)
++);
++
++/**
++ * timer_start - called when the timer is started
++ * @timer: pointer to struct timer_list
++ * @expires: the timers expiry time
++ */
++TRACE_EVENT(timer_start,
++
++ TP_PROTO(struct timer_list *timer, unsigned long expires),
++
++ TP_ARGS(timer, expires),
++
++ TP_STRUCT__entry(
++ __field( void *, timer )
++ __field( void *, function )
++ __field( unsigned long, expires )
++ __field( unsigned long, now )
++ ),
++
++ TP_fast_assign(
++ __entry->timer = timer;
++ __entry->function = timer->function;
++ __entry->expires = expires;
++ __entry->now = jiffies;
++ ),
++
++ TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]",
++ __entry->timer, __entry->function, __entry->expires,
++ (long)__entry->expires - __entry->now)
++);
++
++/**
++ * timer_expire_entry - called immediately before the timer callback
++ * @timer: pointer to struct timer_list
++ *
++ * Allows to determine the timer latency.
++ */
++TRACE_EVENT(timer_expire_entry,
++
++ TP_PROTO(struct timer_list *timer),
++
++ TP_ARGS(timer),
++
++ TP_STRUCT__entry(
++ __field( void *, timer )
++ __field( unsigned long, now )
++ __field( void *, function)
++ ),
++
++ TP_fast_assign(
++ __entry->timer = timer;
++ __entry->now = jiffies;
++ __entry->function = timer->function;
++ ),
++
++ TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now)
++);
++
++/**
++ * timer_expire_exit - called immediately after the timer callback returns
++ * @timer: pointer to struct timer_list
++ *
++ * When used in combination with the timer_expire_entry tracepoint we can
++ * determine the runtime of the timer callback function.
++ *
++ * NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might
++ * be invalid. We solely track the pointer.
++ */
++DEFINE_EVENT(timer_class, timer_expire_exit,
++
++ TP_PROTO(struct timer_list *timer),
++
++ TP_ARGS(timer)
++);
++
++/**
++ * timer_cancel - called when the timer is canceled
++ * @timer: pointer to struct timer_list
++ */
++DEFINE_EVENT(timer_class, timer_cancel,
++
++ TP_PROTO(struct timer_list *timer),
++
++ TP_ARGS(timer)
++);
++
++/**
++ * hrtimer_init - called when the hrtimer is initialized
++ * @timer: pointer to struct hrtimer
++ * @clockid: the hrtimers clock
++ * @mode: the hrtimers mode
++ */
++TRACE_EVENT(hrtimer_init,
++
++ TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid,
++ enum hrtimer_mode mode),
++
++ TP_ARGS(hrtimer, clockid, mode),
++
++ TP_STRUCT__entry(
++ __field( void *, hrtimer )
++ __field( clockid_t, clockid )
++ __field( enum hrtimer_mode, mode )
++ ),
++
++ TP_fast_assign(
++ __entry->hrtimer = hrtimer;
++ __entry->clockid = clockid;
++ __entry->mode = mode;
++ ),
++
++ TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
++ __entry->clockid == CLOCK_REALTIME ?
++ "CLOCK_REALTIME" : "CLOCK_MONOTONIC",
++ __entry->mode == HRTIMER_MODE_ABS ?
++ "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
++);
++
++/**
++ * hrtimer_start - called when the hrtimer is started
++ * @timer: pointer to struct hrtimer
++ */
++TRACE_EVENT(hrtimer_start,
++
++ TP_PROTO(struct hrtimer *hrtimer),
++
++ TP_ARGS(hrtimer),
++
++ TP_STRUCT__entry(
++ __field( void *, hrtimer )
++ __field( void *, function )
++ __field( s64, expires )
++ __field( s64, softexpires )
++ ),
++
++ TP_fast_assign(
++ __entry->hrtimer = hrtimer;
++ __entry->function = hrtimer->function;
++ __entry->expires = hrtimer_get_expires(hrtimer).tv64;
++ __entry->softexpires = hrtimer_get_softexpires(hrtimer).tv64;
++ ),
++
++ TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu",
++ __entry->hrtimer, __entry->function,
++ (unsigned long long)ktime_to_ns((ktime_t) {
++ .tv64 = __entry->expires }),
++ (unsigned long long)ktime_to_ns((ktime_t) {
++ .tv64 = __entry->softexpires }))
++);
++
++/**
++ * htimmer_expire_entry - called immediately before the hrtimer callback
++ * @timer: pointer to struct hrtimer
++ * @now: pointer to variable which contains current time of the
++ * timers base.
++ *
++ * Allows to determine the timer latency.
++ */
++TRACE_EVENT(hrtimer_expire_entry,
++
++ TP_PROTO(struct hrtimer *hrtimer, ktime_t *now),
++
++ TP_ARGS(hrtimer, now),
++
++ TP_STRUCT__entry(
++ __field( void *, hrtimer )
++ __field( s64, now )
++ __field( void *, function)
++ ),
++
++ TP_fast_assign(
++ __entry->hrtimer = hrtimer;
++ __entry->now = now->tv64;
++ __entry->function = hrtimer->function;
++ ),
++
++ TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function,
++ (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now }))
++ );
++
++DECLARE_EVENT_CLASS(hrtimer_class,
++
++ TP_PROTO(struct hrtimer *hrtimer),
++
++ TP_ARGS(hrtimer),
++
++ TP_STRUCT__entry(
++ __field( void *, hrtimer )
++ ),
++
++ TP_fast_assign(
++ __entry->hrtimer = hrtimer;
++ ),
++
++ TP_printk("hrtimer=%p", __entry->hrtimer)
++);
++
++/**
++ * hrtimer_expire_exit - called immediately after the hrtimer callback returns
++ * @timer: pointer to struct hrtimer
++ *
++ * When used in combination with the hrtimer_expire_entry tracepoint we can
++ * determine the runtime of the callback function.
++ */
++DEFINE_EVENT(hrtimer_class, hrtimer_expire_exit,
++
++ TP_PROTO(struct hrtimer *hrtimer),
++
++ TP_ARGS(hrtimer)
++);
++
++/**
++ * hrtimer_cancel - called when the hrtimer is canceled
++ * @hrtimer: pointer to struct hrtimer
++ */
++DEFINE_EVENT(hrtimer_class, hrtimer_cancel,
++
++ TP_PROTO(struct hrtimer *hrtimer),
++
++ TP_ARGS(hrtimer)
++);
++
++/**
++ * itimer_state - called when itimer is started or canceled
++ * @which: name of the interval timer
++ * @value: the itimers value, itimer is canceled if value->it_value is
++ * zero, otherwise it is started
++ * @expires: the itimers expiry time
++ */
++TRACE_EVENT(itimer_state,
++
++ TP_PROTO(int which, const struct itimerval *const value,
++ cputime_t expires),
++
++ TP_ARGS(which, value, expires),
++
++ TP_STRUCT__entry(
++ __field( int, which )
++ __field( cputime_t, expires )
++ __field( long, value_sec )
++ __field( long, value_usec )
++ __field( long, interval_sec )
++ __field( long, interval_usec )
++ ),
++
++ TP_fast_assign(
++ __entry->which = which;
++ __entry->expires = expires;
++ __entry->value_sec = value->it_value.tv_sec;
++ __entry->value_usec = value->it_value.tv_usec;
++ __entry->interval_sec = value->it_interval.tv_sec;
++ __entry->interval_usec = value->it_interval.tv_usec;
++ ),
++
++ TP_printk("which=%d expires=%llu it_value=%ld.%ld it_interval=%ld.%ld",
++ __entry->which, (unsigned long long)__entry->expires,
++ __entry->value_sec, __entry->value_usec,
++ __entry->interval_sec, __entry->interval_usec)
++);
++
++/**
++ * itimer_expire - called when itimer expires
++ * @which: type of the interval timer
++ * @pid: pid of the process which owns the timer
++ * @now: current time, used to calculate the latency of itimer
++ */
++TRACE_EVENT(itimer_expire,
++
++ TP_PROTO(int which, struct pid *pid, cputime_t now),
++
++ TP_ARGS(which, pid, now),
++
++ TP_STRUCT__entry(
++ __field( int , which )
++ __field( pid_t, pid )
++ __field( cputime_t, now )
++ ),
++
++ TP_fast_assign(
++ __entry->which = which;
++ __entry->now = now;
++ __entry->pid = pid_nr(pid);
++ ),
++
++ TP_printk("which=%d pid=%d now=%llu", __entry->which,
++ (int) __entry->pid, (unsigned long long)__entry->now)
++);
++
++#endif /* _TRACE_TIMER_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/udp.h
+@@ -0,0 +1,32 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM udp
++
++#if !defined(_TRACE_UDP_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_UDP_H
++
++#include <linux/udp.h>
++#include <linux/tracepoint.h>
++
++TRACE_EVENT(udp_fail_queue_rcv_skb,
++
++ TP_PROTO(int rc, struct sock *sk),
++
++ TP_ARGS(rc, sk),
++
++ TP_STRUCT__entry(
++ __field(int, rc)
++ __field(__u16, lport)
++ ),
++
++ TP_fast_assign(
++ __entry->rc = rc;
++ __entry->lport = inet_sk(sk)->inet_num;
++ ),
++
++ TP_printk("rc=%d port=%hu", __entry->rc, __entry->lport)
++);
++
++#endif /* _TRACE_UDP_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/vmscan.h
+@@ -0,0 +1,383 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM vmscan
++
++#if !defined(_TRACE_VMSCAN_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_VMSCAN_H
++
++#include <linux/types.h>
++#include <linux/tracepoint.h>
++#include <linux/mm.h>
++#include <linux/memcontrol.h>
++#include <trace/events/gfpflags.h>
++
++#define RECLAIM_WB_ANON 0x0001u
++#define RECLAIM_WB_FILE 0x0002u
++#define RECLAIM_WB_MIXED 0x0010u
++#define RECLAIM_WB_SYNC 0x0004u /* Unused, all reclaim async */
++#define RECLAIM_WB_ASYNC 0x0008u
++
++#define show_reclaim_flags(flags) \
++ (flags) ? __print_flags(flags, "|", \
++ {RECLAIM_WB_ANON, "RECLAIM_WB_ANON"}, \
++ {RECLAIM_WB_FILE, "RECLAIM_WB_FILE"}, \
++ {RECLAIM_WB_MIXED, "RECLAIM_WB_MIXED"}, \
++ {RECLAIM_WB_SYNC, "RECLAIM_WB_SYNC"}, \
++ {RECLAIM_WB_ASYNC, "RECLAIM_WB_ASYNC"} \
++ ) : "RECLAIM_WB_NONE"
++
++#define trace_reclaim_flags(page) ( \
++ (page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
++ (RECLAIM_WB_ASYNC) \
++ )
++
++#define trace_shrink_flags(file) \
++ ( \
++ (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
++ (RECLAIM_WB_ASYNC) \
++ )
++
++TRACE_EVENT(mm_vmscan_kswapd_sleep,
++
++ TP_PROTO(int nid),
++
++ TP_ARGS(nid),
++
++ TP_STRUCT__entry(
++ __field( int, nid )
++ ),
++
++ TP_fast_assign(
++ __entry->nid = nid;
++ ),
++
++ TP_printk("nid=%d", __entry->nid)
++);
++
++TRACE_EVENT(mm_vmscan_kswapd_wake,
++
++ TP_PROTO(int nid, int order),
++
++ TP_ARGS(nid, order),
++
++ TP_STRUCT__entry(
++ __field( int, nid )
++ __field( int, order )
++ ),
++
++ TP_fast_assign(
++ __entry->nid = nid;
++ __entry->order = order;
++ ),
++
++ TP_printk("nid=%d order=%d", __entry->nid, __entry->order)
++);
++
++TRACE_EVENT(mm_vmscan_wakeup_kswapd,
++
++ TP_PROTO(int nid, int zid, int order),
++
++ TP_ARGS(nid, zid, order),
++
++ TP_STRUCT__entry(
++ __field( int, nid )
++ __field( int, zid )
++ __field( int, order )
++ ),
++
++ TP_fast_assign(
++ __entry->nid = nid;
++ __entry->zid = zid;
++ __entry->order = order;
++ ),
++
++ TP_printk("nid=%d zid=%d order=%d",
++ __entry->nid,
++ __entry->zid,
++ __entry->order)
++);
++
++DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template,
++
++ TP_PROTO(int order, int may_writepage, gfp_t gfp_flags),
++
++ TP_ARGS(order, may_writepage, gfp_flags),
++
++ TP_STRUCT__entry(
++ __field( int, order )
++ __field( int, may_writepage )
++ __field( gfp_t, gfp_flags )
++ ),
++
++ TP_fast_assign(
++ __entry->order = order;
++ __entry->may_writepage = may_writepage;
++ __entry->gfp_flags = gfp_flags;
++ ),
++
++ TP_printk("order=%d may_writepage=%d gfp_flags=%s",
++ __entry->order,
++ __entry->may_writepage,
++ show_gfp_flags(__entry->gfp_flags))
++);
++
++DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_direct_reclaim_begin,
++
++ TP_PROTO(int order, int may_writepage, gfp_t gfp_flags),
++
++ TP_ARGS(order, may_writepage, gfp_flags)
++);
++
++DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin,
++
++ TP_PROTO(int order, int may_writepage, gfp_t gfp_flags),
++
++ TP_ARGS(order, may_writepage, gfp_flags)
++);
++
++DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_softlimit_reclaim_begin,
++
++ TP_PROTO(int order, int may_writepage, gfp_t gfp_flags),
++
++ TP_ARGS(order, may_writepage, gfp_flags)
++);
++
++DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_end_template,
++
++ TP_PROTO(unsigned long nr_reclaimed),
++
++ TP_ARGS(nr_reclaimed),
++
++ TP_STRUCT__entry(
++ __field( unsigned long, nr_reclaimed )
++ ),
++
++ TP_fast_assign(
++ __entry->nr_reclaimed = nr_reclaimed;
++ ),
++
++ TP_printk("nr_reclaimed=%lu", __entry->nr_reclaimed)
++);
++
++DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_direct_reclaim_end,
++
++ TP_PROTO(unsigned long nr_reclaimed),
++
++ TP_ARGS(nr_reclaimed)
++);
++
++DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_reclaim_end,
++
++ TP_PROTO(unsigned long nr_reclaimed),
++
++ TP_ARGS(nr_reclaimed)
++);
++
++DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_reclaim_end,
++
++ TP_PROTO(unsigned long nr_reclaimed),
++
++ TP_ARGS(nr_reclaimed)
++);
++
++TRACE_EVENT(mm_shrink_slab_start,
++ TP_PROTO(struct shrinker *shr, struct shrink_control *sc,
++ long nr_objects_to_shrink, unsigned long pgs_scanned,
++ unsigned long lru_pgs, unsigned long cache_items,
++ unsigned long long delta, unsigned long total_scan),
++
++ TP_ARGS(shr, sc, nr_objects_to_shrink, pgs_scanned, lru_pgs,
++ cache_items, delta, total_scan),
++
++ TP_STRUCT__entry(
++ __field(struct shrinker *, shr)
++ __field(void *, shrink)
++ __field(long, nr_objects_to_shrink)
++ __field(gfp_t, gfp_flags)
++ __field(unsigned long, pgs_scanned)
++ __field(unsigned long, lru_pgs)
++ __field(unsigned long, cache_items)
++ __field(unsigned long long, delta)
++ __field(unsigned long, total_scan)
++ ),
++
++ TP_fast_assign(
++ __entry->shr = shr;
++ __entry->shrink = shr->shrink;
++ __entry->nr_objects_to_shrink = nr_objects_to_shrink;
++ __entry->gfp_flags = sc->gfp_mask;
++ __entry->pgs_scanned = pgs_scanned;
++ __entry->lru_pgs = lru_pgs;
++ __entry->cache_items = cache_items;
++ __entry->delta = delta;
++ __entry->total_scan = total_scan;
++ ),
++
++ TP_printk("%pF %p: objects to shrink %ld gfp_flags %s pgs_scanned %ld lru_pgs %ld cache items %ld delta %lld total_scan %ld",
++ __entry->shrink,
++ __entry->shr,
++ __entry->nr_objects_to_shrink,
++ show_gfp_flags(__entry->gfp_flags),
++ __entry->pgs_scanned,
++ __entry->lru_pgs,
++ __entry->cache_items,
++ __entry->delta,
++ __entry->total_scan)
++);
++
++TRACE_EVENT(mm_shrink_slab_end,
++ TP_PROTO(struct shrinker *shr, int shrinker_retval,
++ long unused_scan_cnt, long new_scan_cnt),
++
++ TP_ARGS(shr, shrinker_retval, unused_scan_cnt, new_scan_cnt),
++
++ TP_STRUCT__entry(
++ __field(struct shrinker *, shr)
++ __field(void *, shrink)
++ __field(long, unused_scan)
++ __field(long, new_scan)
++ __field(int, retval)
++ __field(long, total_scan)
++ ),
++
++ TP_fast_assign(
++ __entry->shr = shr;
++ __entry->shrink = shr->shrink;
++ __entry->unused_scan = unused_scan_cnt;
++ __entry->new_scan = new_scan_cnt;
++ __entry->retval = shrinker_retval;
++ __entry->total_scan = new_scan_cnt - unused_scan_cnt;
++ ),
++
++ TP_printk("%pF %p: unused scan count %ld new scan count %ld total_scan %ld last shrinker return val %d",
++ __entry->shrink,
++ __entry->shr,
++ __entry->unused_scan,
++ __entry->new_scan,
++ __entry->total_scan,
++ __entry->retval)
++);
++
++DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
++
++ TP_PROTO(int order,
++ unsigned long nr_requested,
++ unsigned long nr_scanned,
++ unsigned long nr_taken,
++ isolate_mode_t isolate_mode,
++ int file),
++
++ TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file),
++
++ TP_STRUCT__entry(
++ __field(int, order)
++ __field(unsigned long, nr_requested)
++ __field(unsigned long, nr_scanned)
++ __field(unsigned long, nr_taken)
++ __field(isolate_mode_t, isolate_mode)
++ __field(int, file)
++ ),
++
++ TP_fast_assign(
++ __entry->order = order;
++ __entry->nr_requested = nr_requested;
++ __entry->nr_scanned = nr_scanned;
++ __entry->nr_taken = nr_taken;
++ __entry->isolate_mode = isolate_mode;
++ __entry->file = file;
++ ),
++
++ TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu file=%d",
++ __entry->isolate_mode,
++ __entry->order,
++ __entry->nr_requested,
++ __entry->nr_scanned,
++ __entry->nr_taken,
++ __entry->file)
++);
++
++DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_lru_isolate,
++
++ TP_PROTO(int order,
++ unsigned long nr_requested,
++ unsigned long nr_scanned,
++ unsigned long nr_taken,
++ isolate_mode_t isolate_mode,
++ int file),
++
++ TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file)
++
++);
++
++DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_memcg_isolate,
++
++ TP_PROTO(int order,
++ unsigned long nr_requested,
++ unsigned long nr_scanned,
++ unsigned long nr_taken,
++ isolate_mode_t isolate_mode,
++ int file),
++
++ TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file)
++
++);
++
++TRACE_EVENT(mm_vmscan_writepage,
++
++ TP_PROTO(struct page *page,
++ int reclaim_flags),
++
++ TP_ARGS(page, reclaim_flags),
++
++ TP_STRUCT__entry(
++ __field(struct page *, page)
++ __field(int, reclaim_flags)
++ ),
++
++ TP_fast_assign(
++ __entry->page = page;
++ __entry->reclaim_flags = reclaim_flags;
++ ),
++
++ TP_printk("page=%p pfn=%lu flags=%s",
++ __entry->page,
++ page_to_pfn(__entry->page),
++ show_reclaim_flags(__entry->reclaim_flags))
++);
++
++TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
++
++ TP_PROTO(int nid, int zid,
++ unsigned long nr_scanned, unsigned long nr_reclaimed,
++ int priority, int reclaim_flags),
++
++ TP_ARGS(nid, zid, nr_scanned, nr_reclaimed, priority, reclaim_flags),
++
++ TP_STRUCT__entry(
++ __field(int, nid)
++ __field(int, zid)
++ __field(unsigned long, nr_scanned)
++ __field(unsigned long, nr_reclaimed)
++ __field(int, priority)
++ __field(int, reclaim_flags)
++ ),
++
++ TP_fast_assign(
++ __entry->nid = nid;
++ __entry->zid = zid;
++ __entry->nr_scanned = nr_scanned;
++ __entry->nr_reclaimed = nr_reclaimed;
++ __entry->priority = priority;
++ __entry->reclaim_flags = reclaim_flags;
++ ),
++
++ TP_printk("nid=%d zid=%d nr_scanned=%ld nr_reclaimed=%ld priority=%d flags=%s",
++ __entry->nid, __entry->zid,
++ __entry->nr_scanned, __entry->nr_reclaimed,
++ __entry->priority,
++ show_reclaim_flags(__entry->reclaim_flags))
++);
++
++#endif /* _TRACE_VMSCAN_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/workqueue.h
+@@ -0,0 +1,121 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM workqueue
++
++#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_WORKQUEUE_H
++
++#include <linux/tracepoint.h>
++#include <linux/workqueue.h>
++
++DECLARE_EVENT_CLASS(workqueue_work,
++
++ TP_PROTO(struct work_struct *work),
++
++ TP_ARGS(work),
++
++ TP_STRUCT__entry(
++ __field( void *, work )
++ ),
++
++ TP_fast_assign(
++ __entry->work = work;
++ ),
++
++ TP_printk("work struct %p", __entry->work)
++);
++
++/**
++ * workqueue_queue_work - called when a work gets queued
++ * @req_cpu: the requested cpu
++ * @cwq: pointer to struct cpu_workqueue_struct
++ * @work: pointer to struct work_struct
++ *
++ * This event occurs when a work is queued immediately or once a
++ * delayed work is actually queued on a workqueue (ie: once the delay
++ * has been reached).
++ */
++TRACE_EVENT(workqueue_queue_work,
++
++ TP_PROTO(unsigned int req_cpu, struct cpu_workqueue_struct *cwq,
++ struct work_struct *work),
++
++ TP_ARGS(req_cpu, cwq, work),
++
++ TP_STRUCT__entry(
++ __field( void *, work )
++ __field( void *, function)
++ __field( void *, workqueue)
++ __field( unsigned int, req_cpu )
++ __field( unsigned int, cpu )
++ ),
++
++ TP_fast_assign(
++ __entry->work = work;
++ __entry->function = work->func;
++ __entry->workqueue = cwq->wq;
++ __entry->req_cpu = req_cpu;
++ __entry->cpu = cwq->pool->gcwq->cpu;
++ ),
++
++ TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
++ __entry->work, __entry->function, __entry->workqueue,
++ __entry->req_cpu, __entry->cpu)
++);
++
++/**
++ * workqueue_activate_work - called when a work gets activated
++ * @work: pointer to struct work_struct
++ *
++ * This event occurs when a queued work is put on the active queue,
++ * which happens immediately after queueing unless @max_active limit
++ * is reached.
++ */
++DEFINE_EVENT(workqueue_work, workqueue_activate_work,
++
++ TP_PROTO(struct work_struct *work),
++
++ TP_ARGS(work)
++);
++
++/**
++ * workqueue_execute_start - called immediately before the workqueue callback
++ * @work: pointer to struct work_struct
++ *
++ * Allows to track workqueue execution.
++ */
++TRACE_EVENT(workqueue_execute_start,
++
++ TP_PROTO(struct work_struct *work),
++
++ TP_ARGS(work),
++
++ TP_STRUCT__entry(
++ __field( void *, work )
++ __field( void *, function)
++ ),
++
++ TP_fast_assign(
++ __entry->work = work;
++ __entry->function = work->func;
++ ),
++
++ TP_printk("work struct %p: function %pf", __entry->work, __entry->function)
++);
++
++/**
++ * workqueue_execute_end - called immediately after the workqueue callback
++ * @work: pointer to struct work_struct
++ *
++ * Allows to track workqueue execution.
++ */
++DEFINE_EVENT(workqueue_work, workqueue_execute_end,
++
++ TP_PROTO(struct work_struct *work),
++
++ TP_ARGS(work)
++);
++
++#endif /* _TRACE_WORKQUEUE_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/events/mainline/writeback.h
+@@ -0,0 +1,492 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM writeback
++
++#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_WRITEBACK_H
++
++#include <linux/backing-dev.h>
++#include <linux/writeback.h>
++
++#define show_inode_state(state) \
++ __print_flags(state, "|", \
++ {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \
++ {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \
++ {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \
++ {I_NEW, "I_NEW"}, \
++ {I_WILL_FREE, "I_WILL_FREE"}, \
++ {I_FREEING, "I_FREEING"}, \
++ {I_CLEAR, "I_CLEAR"}, \
++ {I_SYNC, "I_SYNC"}, \
++ {I_REFERENCED, "I_REFERENCED"} \
++ )
++
++#define WB_WORK_REASON \
++ {WB_REASON_BACKGROUND, "background"}, \
++ {WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages"}, \
++ {WB_REASON_SYNC, "sync"}, \
++ {WB_REASON_PERIODIC, "periodic"}, \
++ {WB_REASON_LAPTOP_TIMER, "laptop_timer"}, \
++ {WB_REASON_FREE_MORE_MEM, "free_more_memory"}, \
++ {WB_REASON_FS_FREE_SPACE, "fs_free_space"}, \
++ {WB_REASON_FORKER_THREAD, "forker_thread"}
++
++struct wb_writeback_work;
++
++DECLARE_EVENT_CLASS(writeback_work_class,
++ TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work),
++ TP_ARGS(bdi, work),
++ TP_STRUCT__entry(
++ __array(char, name, 32)
++ __field(long, nr_pages)
++ __field(dev_t, sb_dev)
++ __field(int, sync_mode)
++ __field(int, for_kupdate)
++ __field(int, range_cyclic)
++ __field(int, for_background)
++ __field(int, reason)
++ ),
++ TP_fast_assign(
++ struct device *dev = bdi->dev;
++ if (!dev)
++ dev = default_backing_dev_info.dev;
++ strncpy(__entry->name, dev_name(dev), 32);
++ __entry->nr_pages = work->nr_pages;
++ __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
++ __entry->sync_mode = work->sync_mode;
++ __entry->for_kupdate = work->for_kupdate;
++ __entry->range_cyclic = work->range_cyclic;
++ __entry->for_background = work->for_background;
++ __entry->reason = work->reason;
++ ),
++ TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
++ "kupdate=%d range_cyclic=%d background=%d reason=%s",
++ __entry->name,
++ MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
++ __entry->nr_pages,
++ __entry->sync_mode,
++ __entry->for_kupdate,
++ __entry->range_cyclic,
++ __entry->for_background,
++ __print_symbolic(__entry->reason, WB_WORK_REASON)
++ )
++);
++#define DEFINE_WRITEBACK_WORK_EVENT(name) \
++DEFINE_EVENT(writeback_work_class, name, \
++ TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
++ TP_ARGS(bdi, work))
++DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread);
++DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
++DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
++DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
++DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
++DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
++
++TRACE_EVENT(writeback_pages_written,
++ TP_PROTO(long pages_written),
++ TP_ARGS(pages_written),
++ TP_STRUCT__entry(
++ __field(long, pages)
++ ),
++ TP_fast_assign(
++ __entry->pages = pages_written;
++ ),
++ TP_printk("%ld", __entry->pages)
++);
++
++DECLARE_EVENT_CLASS(writeback_class,
++ TP_PROTO(struct backing_dev_info *bdi),
++ TP_ARGS(bdi),
++ TP_STRUCT__entry(
++ __array(char, name, 32)
++ ),
++ TP_fast_assign(
++ strncpy(__entry->name, dev_name(bdi->dev), 32);
++ ),
++ TP_printk("bdi %s",
++ __entry->name
++ )
++);
++#define DEFINE_WRITEBACK_EVENT(name) \
++DEFINE_EVENT(writeback_class, name, \
++ TP_PROTO(struct backing_dev_info *bdi), \
++ TP_ARGS(bdi))
++
++DEFINE_WRITEBACK_EVENT(writeback_nowork);
++DEFINE_WRITEBACK_EVENT(writeback_wake_background);
++DEFINE_WRITEBACK_EVENT(writeback_wake_thread);
++DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread);
++DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
++DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
++DEFINE_WRITEBACK_EVENT(writeback_thread_start);
++DEFINE_WRITEBACK_EVENT(writeback_thread_stop);
++
++DECLARE_EVENT_CLASS(wbc_class,
++ TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
++ TP_ARGS(wbc, bdi),
++ TP_STRUCT__entry(
++ __array(char, name, 32)
++ __field(long, nr_to_write)
++ __field(long, pages_skipped)
++ __field(int, sync_mode)
++ __field(int, for_kupdate)
++ __field(int, for_background)
++ __field(int, for_reclaim)
++ __field(int, range_cyclic)
++ __field(long, range_start)
++ __field(long, range_end)
++ ),
++
++ TP_fast_assign(
++ strncpy(__entry->name, dev_name(bdi->dev), 32);
++ __entry->nr_to_write = wbc->nr_to_write;
++ __entry->pages_skipped = wbc->pages_skipped;
++ __entry->sync_mode = wbc->sync_mode;
++ __entry->for_kupdate = wbc->for_kupdate;
++ __entry->for_background = wbc->for_background;
++ __entry->for_reclaim = wbc->for_reclaim;
++ __entry->range_cyclic = wbc->range_cyclic;
++ __entry->range_start = (long)wbc->range_start;
++ __entry->range_end = (long)wbc->range_end;
++ ),
++
++ TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
++ "bgrd=%d reclm=%d cyclic=%d "
++ "start=0x%lx end=0x%lx",
++ __entry->name,
++ __entry->nr_to_write,
++ __entry->pages_skipped,
++ __entry->sync_mode,
++ __entry->for_kupdate,
++ __entry->for_background,
++ __entry->for_reclaim,
++ __entry->range_cyclic,
++ __entry->range_start,
++ __entry->range_end)
++)
++
++#define DEFINE_WBC_EVENT(name) \
++DEFINE_EVENT(wbc_class, name, \
++ TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
++ TP_ARGS(wbc, bdi))
++DEFINE_WBC_EVENT(wbc_writepage);
++
++TRACE_EVENT(writeback_queue_io,
++ TP_PROTO(struct bdi_writeback *wb,
++ struct wb_writeback_work *work,
++ int moved),
++ TP_ARGS(wb, work, moved),
++ TP_STRUCT__entry(
++ __array(char, name, 32)
++ __field(unsigned long, older)
++ __field(long, age)
++ __field(int, moved)
++ __field(int, reason)
++ ),
++ TP_fast_assign(
++ unsigned long *older_than_this = work->older_than_this;
++ strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
++ __entry->older = older_than_this ? *older_than_this : 0;
++ __entry->age = older_than_this ?
++ (jiffies - *older_than_this) * 1000 / HZ : -1;
++ __entry->moved = moved;
++ __entry->reason = work->reason;
++ ),
++ TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s",
++ __entry->name,
++ __entry->older, /* older_than_this in jiffies */
++ __entry->age, /* older_than_this in relative milliseconds */
++ __entry->moved,
++ __print_symbolic(__entry->reason, WB_WORK_REASON)
++ )
++);
++
++TRACE_EVENT(global_dirty_state,
++
++ TP_PROTO(unsigned long background_thresh,
++ unsigned long dirty_thresh
++ ),
++
++ TP_ARGS(background_thresh,
++ dirty_thresh
++ ),
++
++ TP_STRUCT__entry(
++ __field(unsigned long, nr_dirty)
++ __field(unsigned long, nr_writeback)
++ __field(unsigned long, nr_unstable)
++ __field(unsigned long, background_thresh)
++ __field(unsigned long, dirty_thresh)
++ __field(unsigned long, dirty_limit)
++ __field(unsigned long, nr_dirtied)
++ __field(unsigned long, nr_written)
++ ),
++
++ TP_fast_assign(
++ __entry->nr_dirty = global_page_state(NR_FILE_DIRTY);
++ __entry->nr_writeback = global_page_state(NR_WRITEBACK);
++ __entry->nr_unstable = global_page_state(NR_UNSTABLE_NFS);
++ __entry->nr_dirtied = global_page_state(NR_DIRTIED);
++ __entry->nr_written = global_page_state(NR_WRITTEN);
++ __entry->background_thresh = background_thresh;
++ __entry->dirty_thresh = dirty_thresh;
++ __entry->dirty_limit = global_dirty_limit;
++ ),
++
++ TP_printk("dirty=%lu writeback=%lu unstable=%lu "
++ "bg_thresh=%lu thresh=%lu limit=%lu "
++ "dirtied=%lu written=%lu",
++ __entry->nr_dirty,
++ __entry->nr_writeback,
++ __entry->nr_unstable,
++ __entry->background_thresh,
++ __entry->dirty_thresh,
++ __entry->dirty_limit,
++ __entry->nr_dirtied,
++ __entry->nr_written
++ )
++);
++
++#define KBps(x) ((x) << (PAGE_SHIFT - 10))
++
++TRACE_EVENT(bdi_dirty_ratelimit,
++
++ TP_PROTO(struct backing_dev_info *bdi,
++ unsigned long dirty_rate,
++ unsigned long task_ratelimit),
++
++ TP_ARGS(bdi, dirty_rate, task_ratelimit),
++
++ TP_STRUCT__entry(
++ __array(char, bdi, 32)
++ __field(unsigned long, write_bw)
++ __field(unsigned long, avg_write_bw)
++ __field(unsigned long, dirty_rate)
++ __field(unsigned long, dirty_ratelimit)
++ __field(unsigned long, task_ratelimit)
++ __field(unsigned long, balanced_dirty_ratelimit)
++ ),
++
++ TP_fast_assign(
++ strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
++ __entry->write_bw = KBps(bdi->write_bandwidth);
++ __entry->avg_write_bw = KBps(bdi->avg_write_bandwidth);
++ __entry->dirty_rate = KBps(dirty_rate);
++ __entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit);
++ __entry->task_ratelimit = KBps(task_ratelimit);
++ __entry->balanced_dirty_ratelimit =
++ KBps(bdi->balanced_dirty_ratelimit);
++ ),
++
++ TP_printk("bdi %s: "
++ "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
++ "dirty_ratelimit=%lu task_ratelimit=%lu "
++ "balanced_dirty_ratelimit=%lu",
++ __entry->bdi,
++ __entry->write_bw, /* write bandwidth */
++ __entry->avg_write_bw, /* avg write bandwidth */
++ __entry->dirty_rate, /* bdi dirty rate */
++ __entry->dirty_ratelimit, /* base ratelimit */
++ __entry->task_ratelimit, /* ratelimit with position control */
++ __entry->balanced_dirty_ratelimit /* the balanced ratelimit */
++ )
++);
++
++TRACE_EVENT(balance_dirty_pages,
++
++ TP_PROTO(struct backing_dev_info *bdi,
++ unsigned long thresh,
++ unsigned long bg_thresh,
++ unsigned long dirty,
++ unsigned long bdi_thresh,
++ unsigned long bdi_dirty,
++ unsigned long dirty_ratelimit,
++ unsigned long task_ratelimit,
++ unsigned long dirtied,
++ unsigned long period,
++ long pause,
++ unsigned long start_time),
++
++ TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
++ dirty_ratelimit, task_ratelimit,
++ dirtied, period, pause, start_time),
++
++ TP_STRUCT__entry(
++ __array( char, bdi, 32)
++ __field(unsigned long, limit)
++ __field(unsigned long, setpoint)
++ __field(unsigned long, dirty)
++ __field(unsigned long, bdi_setpoint)
++ __field(unsigned long, bdi_dirty)
++ __field(unsigned long, dirty_ratelimit)
++ __field(unsigned long, task_ratelimit)
++ __field(unsigned int, dirtied)
++ __field(unsigned int, dirtied_pause)
++ __field(unsigned long, paused)
++ __field( long, pause)
++ __field(unsigned long, period)
++ __field( long, think)
++ ),
++
++ TP_fast_assign(
++ unsigned long freerun = (thresh + bg_thresh) / 2;
++ strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
++
++ __entry->limit = global_dirty_limit;
++ __entry->setpoint = (global_dirty_limit + freerun) / 2;
++ __entry->dirty = dirty;
++ __entry->bdi_setpoint = __entry->setpoint *
++ bdi_thresh / (thresh + 1);
++ __entry->bdi_dirty = bdi_dirty;
++ __entry->dirty_ratelimit = KBps(dirty_ratelimit);
++ __entry->task_ratelimit = KBps(task_ratelimit);
++ __entry->dirtied = dirtied;
++ __entry->dirtied_pause = current->nr_dirtied_pause;
++ __entry->think = current->dirty_paused_when == 0 ? 0 :
++ (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
++ __entry->period = period * 1000 / HZ;
++ __entry->pause = pause * 1000 / HZ;
++ __entry->paused = (jiffies - start_time) * 1000 / HZ;
++ ),
++
++
++ TP_printk("bdi %s: "
++ "limit=%lu setpoint=%lu dirty=%lu "
++ "bdi_setpoint=%lu bdi_dirty=%lu "
++ "dirty_ratelimit=%lu task_ratelimit=%lu "
++ "dirtied=%u dirtied_pause=%u "
++ "paused=%lu pause=%ld period=%lu think=%ld",
++ __entry->bdi,
++ __entry->limit,
++ __entry->setpoint,
++ __entry->dirty,
++ __entry->bdi_setpoint,
++ __entry->bdi_dirty,
++ __entry->dirty_ratelimit,
++ __entry->task_ratelimit,
++ __entry->dirtied,
++ __entry->dirtied_pause,
++ __entry->paused, /* ms */
++ __entry->pause, /* ms */
++ __entry->period, /* ms */
++ __entry->think /* ms */
++ )
++);
++
++TRACE_EVENT(writeback_sb_inodes_requeue,
++
++ TP_PROTO(struct inode *inode),
++ TP_ARGS(inode),
++
++ TP_STRUCT__entry(
++ __array(char, name, 32)
++ __field(unsigned long, ino)
++ __field(unsigned long, state)
++ __field(unsigned long, dirtied_when)
++ ),
++
++ TP_fast_assign(
++ strncpy(__entry->name,
++ dev_name(inode_to_bdi(inode)->dev), 32);
++ __entry->ino = inode->i_ino;
++ __entry->state = inode->i_state;
++ __entry->dirtied_when = inode->dirtied_when;
++ ),
++
++ TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu",
++ __entry->name,
++ __entry->ino,
++ show_inode_state(__entry->state),
++ __entry->dirtied_when,
++ (jiffies - __entry->dirtied_when) / HZ
++ )
++);
++
++DECLARE_EVENT_CLASS(writeback_congest_waited_template,
++
++ TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
++
++ TP_ARGS(usec_timeout, usec_delayed),
++
++ TP_STRUCT__entry(
++ __field( unsigned int, usec_timeout )
++ __field( unsigned int, usec_delayed )
++ ),
++
++ TP_fast_assign(
++ __entry->usec_timeout = usec_timeout;
++ __entry->usec_delayed = usec_delayed;
++ ),
++
++ TP_printk("usec_timeout=%u usec_delayed=%u",
++ __entry->usec_timeout,
++ __entry->usec_delayed)
++);
++
++DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
++
++ TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
++
++ TP_ARGS(usec_timeout, usec_delayed)
++);
++
++DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
++
++ TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
++
++ TP_ARGS(usec_timeout, usec_delayed)
++);
++
++DECLARE_EVENT_CLASS(writeback_single_inode_template,
++
++ TP_PROTO(struct inode *inode,
++ struct writeback_control *wbc,
++ unsigned long nr_to_write
++ ),
++
++ TP_ARGS(inode, wbc, nr_to_write),
++
++ TP_STRUCT__entry(
++ __array(char, name, 32)
++ __field(unsigned long, ino)
++ __field(unsigned long, state)
++ __field(unsigned long, dirtied_when)
++ __field(unsigned long, writeback_index)
++ __field(long, nr_to_write)
++ __field(unsigned long, wrote)
++ ),
++
++ TP_fast_assign(
++ strncpy(__entry->name,
++ dev_name(inode_to_bdi(inode)->dev), 32);
++ __entry->ino = inode->i_ino;
++ __entry->state = inode->i_state;
++ __entry->dirtied_when = inode->dirtied_when;
++ __entry->writeback_index = inode->i_mapping->writeback_index;
++ __entry->nr_to_write = nr_to_write;
++ __entry->wrote = nr_to_write - wbc->nr_to_write;
++ ),
++
++ TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
++ "index=%lu to_write=%ld wrote=%lu",
++ __entry->name,
++ __entry->ino,
++ show_inode_state(__entry->state),
++ __entry->dirtied_when,
++ (jiffies - __entry->dirtied_when) / HZ,
++ __entry->writeback_index,
++ __entry->nr_to_write,
++ __entry->wrote
++ )
++);
++
++DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
++ TP_PROTO(struct inode *inode,
++ struct writeback_control *wbc,
++ unsigned long nr_to_write),
++ TP_ARGS(inode, wbc, nr_to_write)
++);
++
++#endif /* _TRACE_WRITEBACK_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/3.0.34/powerpc-32-syscalls-3.0.34
+@@ -0,0 +1,286 @@
++syscall sys_restart_syscall nr 0 nbargs 0 types: () args: ()
++syscall sys_exit nr 1 nbargs 1 types: (int) args: (error_code)
++syscall sys_read nr 3 nbargs 3 types: (unsigned int, char *, size_t) args: (fd, buf, count)
++syscall sys_write nr 4 nbargs 3 types: (unsigned int, const char *, size_t) args: (fd, buf, count)
++syscall sys_open nr 5 nbargs 3 types: (const char *, int, int) args: (filename, flags, mode)
++syscall sys_close nr 6 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_waitpid nr 7 nbargs 3 types: (pid_t, int *, int) args: (pid, stat_addr, options)
++syscall sys_creat nr 8 nbargs 2 types: (const char *, int) args: (pathname, mode)
++syscall sys_link nr 9 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
++syscall sys_unlink nr 10 nbargs 1 types: (const char *) args: (pathname)
++syscall sys_chdir nr 12 nbargs 1 types: (const char *) args: (filename)
++syscall sys_time nr 13 nbargs 1 types: (time_t *) args: (tloc)
++syscall sys_mknod nr 14 nbargs 3 types: (const char *, int, unsigned) args: (filename, mode, dev)
++syscall sys_chmod nr 15 nbargs 2 types: (const char *, mode_t) args: (filename, mode)
++syscall sys_lchown nr 16 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
++syscall sys_stat nr 18 nbargs 2 types: (const char *, struct __old_kernel_stat *) args: (filename, statbuf)
++syscall sys_lseek nr 19 nbargs 3 types: (unsigned int, off_t, unsigned int) args: (fd, offset, origin)
++syscall sys_getpid nr 20 nbargs 0 types: () args: ()
++syscall sys_mount nr 21 nbargs 5 types: (char *, char *, char *, unsigned long, void *) args: (dev_name, dir_name, type, flags, data)
++syscall sys_oldumount nr 22 nbargs 1 types: (char *) args: (name)
++syscall sys_setuid nr 23 nbargs 1 types: (uid_t) args: (uid)
++syscall sys_getuid nr 24 nbargs 0 types: () args: ()
++syscall sys_stime nr 25 nbargs 1 types: (time_t *) args: (tptr)
++syscall sys_ptrace nr 26 nbargs 4 types: (long, long, unsigned long, unsigned long) args: (request, pid, addr, data)
++syscall sys_alarm nr 27 nbargs 1 types: (unsigned int) args: (seconds)
++syscall sys_fstat nr 28 nbargs 2 types: (unsigned int, struct __old_kernel_stat *) args: (fd, statbuf)
++syscall sys_pause nr 29 nbargs 0 types: () args: ()
++syscall sys_utime nr 30 nbargs 2 types: (char *, struct utimbuf *) args: (filename, times)
++syscall sys_access nr 33 nbargs 2 types: (const char *, int) args: (filename, mode)
++syscall sys_nice nr 34 nbargs 1 types: (int) args: (increment)
++syscall sys_sync nr 36 nbargs 0 types: () args: ()
++syscall sys_kill nr 37 nbargs 2 types: (pid_t, int) args: (pid, sig)
++syscall sys_rename nr 38 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
++syscall sys_mkdir nr 39 nbargs 2 types: (const char *, int) args: (pathname, mode)
++syscall sys_rmdir nr 40 nbargs 1 types: (const char *) args: (pathname)
++syscall sys_dup nr 41 nbargs 1 types: (unsigned int) args: (fildes)
++syscall sys_pipe nr 42 nbargs 1 types: (int *) args: (fildes)
++syscall sys_times nr 43 nbargs 1 types: (struct tms *) args: (tbuf)
++syscall sys_brk nr 45 nbargs 1 types: (unsigned long) args: (brk)
++syscall sys_setgid nr 46 nbargs 1 types: (gid_t) args: (gid)
++syscall sys_getgid nr 47 nbargs 0 types: () args: ()
++syscall sys_signal nr 48 nbargs 2 types: (int, __sighandler_t) args: (sig, handler)
++syscall sys_geteuid nr 49 nbargs 0 types: () args: ()
++syscall sys_getegid nr 50 nbargs 0 types: () args: ()
++syscall sys_acct nr 51 nbargs 1 types: (const char *) args: (name)
++syscall sys_umount nr 52 nbargs 2 types: (char *, int) args: (name, flags)
++syscall sys_ioctl nr 54 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
++syscall sys_fcntl nr 55 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
++syscall sys_setpgid nr 57 nbargs 2 types: (pid_t, pid_t) args: (pid, pgid)
++syscall sys_olduname nr 59 nbargs 1 types: (struct oldold_utsname *) args: (name)
++syscall sys_umask nr 60 nbargs 1 types: (int) args: (mask)
++syscall sys_chroot nr 61 nbargs 1 types: (const char *) args: (filename)
++syscall sys_ustat nr 62 nbargs 2 types: (unsigned, struct ustat *) args: (dev, ubuf)
++syscall sys_dup2 nr 63 nbargs 2 types: (unsigned int, unsigned int) args: (oldfd, newfd)
++syscall sys_getppid nr 64 nbargs 0 types: () args: ()
++syscall sys_getpgrp nr 65 nbargs 0 types: () args: ()
++syscall sys_setsid nr 66 nbargs 0 types: () args: ()
++syscall sys_sgetmask nr 68 nbargs 0 types: () args: ()
++syscall sys_ssetmask nr 69 nbargs 1 types: (int) args: (newmask)
++syscall sys_setreuid nr 70 nbargs 2 types: (uid_t, uid_t) args: (ruid, euid)
++syscall sys_setregid nr 71 nbargs 2 types: (gid_t, gid_t) args: (rgid, egid)
++syscall sys_sigpending nr 73 nbargs 1 types: (old_sigset_t *) args: (set)
++syscall sys_sethostname nr 74 nbargs 2 types: (char *, int) args: (name, len)
++syscall sys_setrlimit nr 75 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
++syscall sys_old_getrlimit nr 76 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
++syscall sys_getrusage nr 77 nbargs 2 types: (int, struct rusage *) args: (who, ru)
++syscall sys_gettimeofday nr 78 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
++syscall sys_settimeofday nr 79 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
++syscall sys_getgroups nr 80 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
++syscall sys_setgroups nr 81 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
++syscall sys_symlink nr 83 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
++syscall sys_lstat nr 84 nbargs 2 types: (const char *, struct __old_kernel_stat *) args: (filename, statbuf)
++syscall sys_readlink nr 85 nbargs 3 types: (const char *, char *, int) args: (path, buf, bufsiz)
++syscall sys_uselib nr 86 nbargs 1 types: (const char *) args: (library)
++syscall sys_swapon nr 87 nbargs 2 types: (const char *, int) args: (specialfile, swap_flags)
++syscall sys_reboot nr 88 nbargs 4 types: (int, int, unsigned int, void *) args: (magic1, magic2, cmd, arg)
++syscall sys_old_readdir nr 89 nbargs 3 types: (unsigned int, struct old_linux_dirent *, unsigned int) args: (fd, dirent, count)
++syscall sys_munmap nr 91 nbargs 2 types: (unsigned long, size_t) args: (addr, len)
++syscall sys_truncate nr 92 nbargs 2 types: (const char *, long) args: (path, length)
++syscall sys_ftruncate nr 93 nbargs 2 types: (unsigned int, unsigned long) args: (fd, length)
++syscall sys_fchmod nr 94 nbargs 2 types: (unsigned int, mode_t) args: (fd, mode)
++syscall sys_fchown nr 95 nbargs 3 types: (unsigned int, uid_t, gid_t) args: (fd, user, group)
++syscall sys_getpriority nr 96 nbargs 2 types: (int, int) args: (which, who)
++syscall sys_setpriority nr 97 nbargs 3 types: (int, int, int) args: (which, who, niceval)
++syscall sys_statfs nr 99 nbargs 2 types: (const char *, struct statfs *) args: (pathname, buf)
++syscall sys_fstatfs nr 100 nbargs 2 types: (unsigned int, struct statfs *) args: (fd, buf)
++syscall sys_socketcall nr 102 nbargs 2 types: (int, unsigned long *) args: (call, args)
++syscall sys_syslog nr 103 nbargs 3 types: (int, char *, int) args: (type, buf, len)
++syscall sys_setitimer nr 104 nbargs 3 types: (int, struct itimerval *, struct itimerval *) args: (which, value, ovalue)
++syscall sys_getitimer nr 105 nbargs 2 types: (int, struct itimerval *) args: (which, value)
++syscall sys_newstat nr 106 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
++syscall sys_newlstat nr 107 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
++syscall sys_newfstat nr 108 nbargs 2 types: (unsigned int, struct stat *) args: (fd, statbuf)
++syscall sys_uname nr 109 nbargs 1 types: (struct old_utsname *) args: (name)
++syscall sys_vhangup nr 111 nbargs 0 types: () args: ()
++syscall sys_wait4 nr 114 nbargs 4 types: (pid_t, int *, int, struct rusage *) args: (upid, stat_addr, options, ru)
++syscall sys_swapoff nr 115 nbargs 1 types: (const char *) args: (specialfile)
++syscall sys_sysinfo nr 116 nbargs 1 types: (struct sysinfo *) args: (info)
++syscall sys_ipc nr 117 nbargs 6 types: (unsigned int, int, unsigned long, unsigned long, void *, long) args: (call, first, second, third, ptr, fifth)
++syscall sys_fsync nr 118 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_setdomainname nr 121 nbargs 2 types: (char *, int) args: (name, len)
++syscall sys_newuname nr 122 nbargs 1 types: (struct new_utsname *) args: (name)
++syscall sys_adjtimex nr 124 nbargs 1 types: (struct timex *) args: (txc_p)
++syscall sys_mprotect nr 125 nbargs 3 types: (unsigned long, size_t, unsigned long) args: (start, len, prot)
++syscall sys_sigprocmask nr 126 nbargs 3 types: (int, old_sigset_t *, old_sigset_t *) args: (how, nset, oset)
++syscall sys_init_module nr 128 nbargs 3 types: (void *, unsigned long, const char *) args: (umod, len, uargs)
++syscall sys_delete_module nr 129 nbargs 2 types: (const char *, unsigned int) args: (name_user, flags)
++syscall sys_getpgid nr 132 nbargs 1 types: (pid_t) args: (pid)
++syscall sys_fchdir nr 133 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_bdflush nr 134 nbargs 2 types: (int, long) args: (func, data)
++syscall sys_sysfs nr 135 nbargs 3 types: (int, unsigned long, unsigned long) args: (option, arg1, arg2)
++syscall sys_personality nr 136 nbargs 1 types: (unsigned int) args: (personality)
++syscall sys_setfsuid nr 138 nbargs 1 types: (uid_t) args: (uid)
++syscall sys_setfsgid nr 139 nbargs 1 types: (gid_t) args: (gid)
++syscall sys_llseek nr 140 nbargs 5 types: (unsigned int, unsigned long, unsigned long, loff_t *, unsigned int) args: (fd, offset_high, offset_low, result, origin)
++syscall sys_getdents nr 141 nbargs 3 types: (unsigned int, struct linux_dirent *, unsigned int) args: (fd, dirent, count)
++syscall sys_select nr 142 nbargs 5 types: (int, fd_set *, fd_set *, fd_set *, struct timeval *) args: (n, inp, outp, exp, tvp)
++syscall sys_flock nr 143 nbargs 2 types: (unsigned int, unsigned int) args: (fd, cmd)
++syscall sys_msync nr 144 nbargs 3 types: (unsigned long, size_t, int) args: (start, len, flags)
++syscall sys_readv nr 145 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
++syscall sys_writev nr 146 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
++syscall sys_getsid nr 147 nbargs 1 types: (pid_t) args: (pid)
++syscall sys_fdatasync nr 148 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_sysctl nr 149 nbargs 1 types: (struct __sysctl_args *) args: (args)
++syscall sys_mlock nr 150 nbargs 2 types: (unsigned long, size_t) args: (start, len)
++syscall sys_munlock nr 151 nbargs 2 types: (unsigned long, size_t) args: (start, len)
++syscall sys_mlockall nr 152 nbargs 1 types: (int) args: (flags)
++syscall sys_munlockall nr 153 nbargs 0 types: () args: ()
++syscall sys_sched_setparam nr 154 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
++syscall sys_sched_getparam nr 155 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
++syscall sys_sched_setscheduler nr 156 nbargs 3 types: (pid_t, int, struct sched_param *) args: (pid, policy, param)
++syscall sys_sched_getscheduler nr 157 nbargs 1 types: (pid_t) args: (pid)
++syscall sys_sched_yield nr 158 nbargs 0 types: () args: ()
++syscall sys_sched_get_priority_max nr 159 nbargs 1 types: (int) args: (policy)
++syscall sys_sched_get_priority_min nr 160 nbargs 1 types: (int) args: (policy)
++syscall sys_sched_rr_get_interval nr 161 nbargs 2 types: (pid_t, struct timespec *) args: (pid, interval)
++syscall sys_nanosleep nr 162 nbargs 2 types: (struct timespec *, struct timespec *) args: (rqtp, rmtp)
++syscall sys_mremap nr 163 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (addr, old_len, new_len, flags, new_addr)
++syscall sys_setresuid nr 164 nbargs 3 types: (uid_t, uid_t, uid_t) args: (ruid, euid, suid)
++syscall sys_getresuid nr 165 nbargs 3 types: (uid_t *, uid_t *, uid_t *) args: (ruid, euid, suid)
++syscall sys_poll nr 167 nbargs 3 types: (struct pollfd *, unsigned int, long) args: (ufds, nfds, timeout_msecs)
++syscall sys_nfsservctl nr 168 nbargs 3 types: (int, struct nfsctl_arg *, void *) args: (cmd, arg, res)
++syscall sys_setresgid nr 169 nbargs 3 types: (gid_t, gid_t, gid_t) args: (rgid, egid, sgid)
++syscall sys_getresgid nr 170 nbargs 3 types: (gid_t *, gid_t *, gid_t *) args: (rgid, egid, sgid)
++syscall sys_prctl nr 171 nbargs 5 types: (int, unsigned long, unsigned long, unsigned long, unsigned long) args: (option, arg2, arg3, arg4, arg5)
++syscall sys_rt_sigaction nr 173 nbargs 4 types: (int, const struct sigaction *, struct sigaction *, size_t) args: (sig, act, oact, sigsetsize)
++syscall sys_rt_sigprocmask nr 174 nbargs 4 types: (int, sigset_t *, sigset_t *, size_t) args: (how, nset, oset, sigsetsize)
++syscall sys_rt_sigpending nr 175 nbargs 2 types: (sigset_t *, size_t) args: (set, sigsetsize)
++syscall sys_rt_sigtimedwait nr 176 nbargs 4 types: (const sigset_t *, siginfo_t *, const struct timespec *, size_t) args: (uthese, uinfo, uts, sigsetsize)
++syscall sys_rt_sigqueueinfo nr 177 nbargs 3 types: (pid_t, int, siginfo_t *) args: (pid, sig, uinfo)
++syscall sys_rt_sigsuspend nr 178 nbargs 2 types: (sigset_t *, size_t) args: (unewset, sigsetsize)
++syscall sys_chown nr 181 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
++syscall sys_getcwd nr 182 nbargs 2 types: (char *, unsigned long) args: (buf, size)
++syscall sys_capget nr 183 nbargs 2 types: (cap_user_header_t, cap_user_data_t) args: (header, dataptr)
++syscall sys_capset nr 184 nbargs 2 types: (cap_user_header_t, const cap_user_data_t) args: (header, data)
++syscall sys_sendfile nr 186 nbargs 4 types: (int, int, off_t *, size_t) args: (out_fd, in_fd, offset, count)
++syscall sys_getrlimit nr 190 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
++syscall sys_stat64 nr 195 nbargs 2 types: (const char *, struct stat64 *) args: (filename, statbuf)
++syscall sys_lstat64 nr 196 nbargs 2 types: (const char *, struct stat64 *) args: (filename, statbuf)
++syscall sys_fstat64 nr 197 nbargs 2 types: (unsigned long, struct stat64 *) args: (fd, statbuf)
++syscall sys_pciconfig_read nr 198 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, void *) args: (bus, dfn, off, len, buf)
++syscall sys_pciconfig_write nr 199 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, void *) args: (bus, dfn, off, len, buf)
++syscall sys_getdents64 nr 202 nbargs 3 types: (unsigned int, struct linux_dirent64 *, unsigned int) args: (fd, dirent, count)
++syscall sys_pivot_root nr 203 nbargs 2 types: (const char *, const char *) args: (new_root, put_old)
++syscall sys_fcntl64 nr 204 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
++syscall sys_madvise nr 205 nbargs 3 types: (unsigned long, size_t, int) args: (start, len_in, behavior)
++syscall sys_mincore nr 206 nbargs 3 types: (unsigned long, size_t, unsigned char *) args: (start, len, vec)
++syscall sys_gettid nr 207 nbargs 0 types: () args: ()
++syscall sys_tkill nr 208 nbargs 2 types: (pid_t, int) args: (pid, sig)
++syscall sys_setxattr nr 209 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
++syscall sys_lsetxattr nr 210 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
++syscall sys_fsetxattr nr 211 nbargs 5 types: (int, const char *, const void *, size_t, int) args: (fd, name, value, size, flags)
++syscall sys_getxattr nr 212 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
++syscall sys_lgetxattr nr 213 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
++syscall sys_fgetxattr nr 214 nbargs 4 types: (int, const char *, void *, size_t) args: (fd, name, value, size)
++syscall sys_listxattr nr 215 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
++syscall sys_llistxattr nr 216 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
++syscall sys_flistxattr nr 217 nbargs 3 types: (int, char *, size_t) args: (fd, list, size)
++syscall sys_removexattr nr 218 nbargs 2 types: (const char *, const char *) args: (pathname, name)
++syscall sys_lremovexattr nr 219 nbargs 2 types: (const char *, const char *) args: (pathname, name)
++syscall sys_fremovexattr nr 220 nbargs 2 types: (int, const char *) args: (fd, name)
++syscall sys_futex nr 221 nbargs 6 types: (u32 *, int, u32, struct timespec *, u32 *, u32) args: (uaddr, op, val, utime, uaddr2, val3)
++syscall sys_sched_setaffinity nr 222 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
++syscall sys_sched_getaffinity nr 223 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
++syscall sys_sendfile64 nr 226 nbargs 4 types: (int, int, loff_t *, size_t) args: (out_fd, in_fd, offset, count)
++syscall sys_io_setup nr 227 nbargs 2 types: (unsigned, aio_context_t *) args: (nr_events, ctxp)
++syscall sys_io_destroy nr 228 nbargs 1 types: (aio_context_t) args: (ctx)
++syscall sys_io_getevents nr 229 nbargs 5 types: (aio_context_t, long, long, struct io_event *, struct timespec *) args: (ctx_id, min_nr, nr, events, timeout)
++syscall sys_io_submit nr 230 nbargs 3 types: (aio_context_t, long, struct iocb * *) args: (ctx_id, nr, iocbpp)
++syscall sys_io_cancel nr 231 nbargs 3 types: (aio_context_t, struct iocb *, struct io_event *) args: (ctx_id, iocb, result)
++syscall sys_set_tid_address nr 232 nbargs 1 types: (int *) args: (tidptr)
++syscall sys_exit_group nr 234 nbargs 1 types: (int) args: (error_code)
++syscall sys_epoll_create nr 236 nbargs 1 types: (int) args: (size)
++syscall sys_epoll_ctl nr 237 nbargs 4 types: (int, int, int, struct epoll_event *) args: (epfd, op, fd, event)
++syscall sys_epoll_wait nr 238 nbargs 4 types: (int, struct epoll_event *, int, int) args: (epfd, events, maxevents, timeout)
++syscall sys_remap_file_pages nr 239 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (start, size, prot, pgoff, flags)
++syscall sys_timer_create nr 240 nbargs 3 types: (const clockid_t, struct sigevent *, timer_t *) args: (which_clock, timer_event_spec, created_timer_id)
++syscall sys_timer_settime nr 241 nbargs 4 types: (timer_t, int, const struct itimerspec *, struct itimerspec *) args: (timer_id, flags, new_setting, old_setting)
++syscall sys_timer_gettime nr 242 nbargs 2 types: (timer_t, struct itimerspec *) args: (timer_id, setting)
++syscall sys_timer_getoverrun nr 243 nbargs 1 types: (timer_t) args: (timer_id)
++syscall sys_timer_delete nr 244 nbargs 1 types: (timer_t) args: (timer_id)
++syscall sys_clock_settime nr 245 nbargs 2 types: (const clockid_t, const struct timespec *) args: (which_clock, tp)
++syscall sys_clock_gettime nr 246 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
++syscall sys_clock_getres nr 247 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
++syscall sys_clock_nanosleep nr 248 nbargs 4 types: (const clockid_t, int, const struct timespec *, struct timespec *) args: (which_clock, flags, rqtp, rmtp)
++syscall sys_tgkill nr 250 nbargs 3 types: (pid_t, pid_t, int) args: (tgid, pid, sig)
++syscall sys_utimes nr 251 nbargs 2 types: (char *, struct timeval *) args: (filename, utimes)
++syscall sys_statfs64 nr 252 nbargs 3 types: (const char *, size_t, struct statfs64 *) args: (pathname, sz, buf)
++syscall sys_fstatfs64 nr 253 nbargs 3 types: (unsigned int, size_t, struct statfs64 *) args: (fd, sz, buf)
++syscall sys_mq_open nr 262 nbargs 4 types: (const char *, int, mode_t, struct mq_attr *) args: (u_name, oflag, mode, u_attr)
++syscall sys_mq_unlink nr 263 nbargs 1 types: (const char *) args: (u_name)
++syscall sys_mq_timedsend nr 264 nbargs 5 types: (mqd_t, const char *, size_t, unsigned int, const struct timespec *) args: (mqdes, u_msg_ptr, msg_len, msg_prio, u_abs_timeout)
++syscall sys_mq_timedreceive nr 265 nbargs 5 types: (mqd_t, char *, size_t, unsigned int *, const struct timespec *) args: (mqdes, u_msg_ptr, msg_len, u_msg_prio, u_abs_timeout)
++syscall sys_mq_notify nr 266 nbargs 2 types: (mqd_t, const struct sigevent *) args: (mqdes, u_notification)
++syscall sys_mq_getsetattr nr 267 nbargs 3 types: (mqd_t, const struct mq_attr *, struct mq_attr *) args: (mqdes, u_mqstat, u_omqstat)
++syscall sys_add_key nr 269 nbargs 5 types: (const char *, const char *, const void *, size_t, key_serial_t) args: (_type, _description, _payload, plen, ringid)
++syscall sys_request_key nr 270 nbargs 4 types: (const char *, const char *, const char *, key_serial_t) args: (_type, _description, _callout_info, destringid)
++syscall sys_keyctl nr 271 nbargs 5 types: (int, unsigned long, unsigned long, unsigned long, unsigned long) args: (option, arg2, arg3, arg4, arg5)
++syscall sys_waitid nr 272 nbargs 5 types: (int, pid_t, struct siginfo *, int, struct rusage *) args: (which, upid, infop, options, ru)
++syscall sys_ioprio_set nr 273 nbargs 3 types: (int, int, int) args: (which, who, ioprio)
++syscall sys_ioprio_get nr 274 nbargs 2 types: (int, int) args: (which, who)
++syscall sys_inotify_init nr 275 nbargs 0 types: () args: ()
++syscall sys_inotify_add_watch nr 276 nbargs 3 types: (int, const char *, u32) args: (fd, pathname, mask)
++syscall sys_inotify_rm_watch nr 277 nbargs 2 types: (int, __s32) args: (fd, wd)
++syscall sys_pselect6 nr 280 nbargs 6 types: (int, fd_set *, fd_set *, fd_set *, struct timespec *, void *) args: (n, inp, outp, exp, tsp, sig)
++syscall sys_ppoll nr 281 nbargs 5 types: (struct pollfd *, unsigned int, struct timespec *, const sigset_t *, size_t) args: (ufds, nfds, tsp, sigmask, sigsetsize)
++syscall sys_unshare nr 282 nbargs 1 types: (unsigned long) args: (unshare_flags)
++syscall sys_splice nr 283 nbargs 6 types: (int, loff_t *, int, loff_t *, size_t, unsigned int) args: (fd_in, off_in, fd_out, off_out, len, flags)
++syscall sys_tee nr 284 nbargs 4 types: (int, int, size_t, unsigned int) args: (fdin, fdout, len, flags)
++syscall sys_vmsplice nr 285 nbargs 4 types: (int, const struct iovec *, unsigned long, unsigned int) args: (fd, iov, nr_segs, flags)
++syscall sys_openat nr 286 nbargs 4 types: (int, const char *, int, int) args: (dfd, filename, flags, mode)
++syscall sys_mkdirat nr 287 nbargs 3 types: (int, const char *, int) args: (dfd, pathname, mode)
++syscall sys_mknodat nr 288 nbargs 4 types: (int, const char *, int, unsigned) args: (dfd, filename, mode, dev)
++syscall sys_fchownat nr 289 nbargs 5 types: (int, const char *, uid_t, gid_t, int) args: (dfd, filename, user, group, flag)
++syscall sys_futimesat nr 290 nbargs 3 types: (int, const char *, struct timeval *) args: (dfd, filename, utimes)
++syscall sys_fstatat64 nr 291 nbargs 4 types: (int, const char *, struct stat64 *, int) args: (dfd, filename, statbuf, flag)
++syscall sys_unlinkat nr 292 nbargs 3 types: (int, const char *, int) args: (dfd, pathname, flag)
++syscall sys_renameat nr 293 nbargs 4 types: (int, const char *, int, const char *) args: (olddfd, oldname, newdfd, newname)
++syscall sys_linkat nr 294 nbargs 5 types: (int, const char *, int, const char *, int) args: (olddfd, oldname, newdfd, newname, flags)
++syscall sys_symlinkat nr 295 nbargs 3 types: (const char *, int, const char *) args: (oldname, newdfd, newname)
++syscall sys_readlinkat nr 296 nbargs 4 types: (int, const char *, char *, int) args: (dfd, pathname, buf, bufsiz)
++syscall sys_fchmodat nr 297 nbargs 3 types: (int, const char *, mode_t) args: (dfd, filename, mode)
++syscall sys_faccessat nr 298 nbargs 3 types: (int, const char *, int) args: (dfd, filename, mode)
++syscall sys_get_robust_list nr 299 nbargs 3 types: (int, struct robust_list_head * *, size_t *) args: (pid, head_ptr, len_ptr)
++syscall sys_set_robust_list nr 300 nbargs 2 types: (struct robust_list_head *, size_t) args: (head, len)
++syscall sys_getcpu nr 302 nbargs 3 types: (unsigned *, unsigned *, struct getcpu_cache *) args: (cpup, nodep, unused)
++syscall sys_epoll_pwait nr 303 nbargs 6 types: (int, struct epoll_event *, int, int, const sigset_t *, size_t) args: (epfd, events, maxevents, timeout, sigmask, sigsetsize)
++syscall sys_utimensat nr 304 nbargs 4 types: (int, const char *, struct timespec *, int) args: (dfd, filename, utimes, flags)
++syscall sys_signalfd nr 305 nbargs 3 types: (int, sigset_t *, size_t) args: (ufd, user_mask, sizemask)
++syscall sys_timerfd_create nr 306 nbargs 2 types: (int, int) args: (clockid, flags)
++syscall sys_eventfd nr 307 nbargs 1 types: (unsigned int) args: (count)
++syscall sys_timerfd_settime nr 311 nbargs 4 types: (int, int, const struct itimerspec *, struct itimerspec *) args: (ufd, flags, utmr, otmr)
++syscall sys_timerfd_gettime nr 312 nbargs 2 types: (int, struct itimerspec *) args: (ufd, otmr)
++syscall sys_signalfd4 nr 313 nbargs 4 types: (int, sigset_t *, size_t, int) args: (ufd, user_mask, sizemask, flags)
++syscall sys_eventfd2 nr 314 nbargs 2 types: (unsigned int, int) args: (count, flags)
++syscall sys_epoll_create1 nr 315 nbargs 1 types: (int) args: (flags)
++syscall sys_dup3 nr 316 nbargs 3 types: (unsigned int, unsigned int, int) args: (oldfd, newfd, flags)
++syscall sys_pipe2 nr 317 nbargs 2 types: (int *, int) args: (fildes, flags)
++syscall sys_inotify_init1 nr 318 nbargs 1 types: (int) args: (flags)
++syscall sys_perf_event_open nr 319 nbargs 5 types: (struct perf_event_attr *, pid_t, int, int, unsigned long) args: (attr_uptr, pid, cpu, group_fd, flags)
++syscall sys_preadv nr 320 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
++syscall sys_pwritev nr 321 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
++syscall sys_rt_tgsigqueueinfo nr 322 nbargs 4 types: (pid_t, pid_t, int, siginfo_t *) args: (tgid, pid, sig, uinfo)
++syscall sys_prlimit64 nr 325 nbargs 4 types: (pid_t, unsigned int, const struct rlimit64 *, struct rlimit64 *) args: (pid, resource, new_rlim, old_rlim)
++syscall sys_socket nr 326 nbargs 3 types: (int, int, int) args: (family, type, protocol)
++syscall sys_bind nr 327 nbargs 3 types: (int, struct sockaddr *, int) args: (fd, umyaddr, addrlen)
++syscall sys_connect nr 328 nbargs 3 types: (int, struct sockaddr *, int) args: (fd, uservaddr, addrlen)
++syscall sys_listen nr 329 nbargs 2 types: (int, int) args: (fd, backlog)
++syscall sys_accept nr 330 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, upeer_sockaddr, upeer_addrlen)
++syscall sys_getsockname nr 331 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, usockaddr, usockaddr_len)
++syscall sys_getpeername nr 332 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, usockaddr, usockaddr_len)
++syscall sys_socketpair nr 333 nbargs 4 types: (int, int, int, int *) args: (family, type, protocol, usockvec)
++syscall sys_send nr 334 nbargs 4 types: (int, void *, size_t, unsigned) args: (fd, buff, len, flags)
++syscall sys_sendto nr 335 nbargs 6 types: (int, void *, size_t, unsigned, struct sockaddr *, int) args: (fd, buff, len, flags, addr, addr_len)
++syscall sys_recvfrom nr 337 nbargs 6 types: (int, void *, size_t, unsigned, struct sockaddr *, int *) args: (fd, ubuf, size, flags, addr, addr_len)
++syscall sys_shutdown nr 338 nbargs 2 types: (int, int) args: (fd, how)
++syscall sys_setsockopt nr 339 nbargs 5 types: (int, int, int, char *, int) args: (fd, level, optname, optval, optlen)
++syscall sys_getsockopt nr 340 nbargs 5 types: (int, int, int, char *, int *) args: (fd, level, optname, optval, optlen)
++syscall sys_sendmsg nr 341 nbargs 3 types: (int, struct msghdr *, unsigned) args: (fd, msg, flags)
++syscall sys_recvmsg nr 342 nbargs 3 types: (int, struct msghdr *, unsigned int) args: (fd, msg, flags)
++syscall sys_recvmmsg nr 343 nbargs 5 types: (int, struct mmsghdr *, unsigned int, unsigned int, struct timespec *) args: (fd, mmsg, vlen, flags, timeout)
++syscall sys_accept4 nr 344 nbargs 4 types: (int, struct sockaddr *, int *, int) args: (fd, upeer_sockaddr, upeer_addrlen, flags)
++syscall sys_clock_adjtime nr 347 nbargs 2 types: (const clockid_t, struct timex *) args: (which_clock, utx)
++syscall sys_syncfs nr 348 nbargs 1 types: (int) args: (fd)
++syscall sys_sendmmsg nr 349 nbargs 4 types: (int, struct mmsghdr *, unsigned int, unsigned int) args: (fd, mmsg, vlen, flags)
++syscall sys_setns nr 350 nbargs 2 types: (int, int) args: (fd, nstype)
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/3.1.0-rc6/x86-32-syscalls-3.1.0-rc6
+@@ -0,0 +1,291 @@
++syscall sys_restart_syscall nr 0 nbargs 0 types: () args: ()
++syscall sys_exit nr 1 nbargs 1 types: (int) args: (error_code)
++syscall sys_read nr 3 nbargs 3 types: (unsigned int, char *, size_t) args: (fd, buf, count)
++syscall sys_write nr 4 nbargs 3 types: (unsigned int, const char *, size_t) args: (fd, buf, count)
++syscall sys_open nr 5 nbargs 3 types: (const char *, int, int) args: (filename, flags, mode)
++syscall sys_close nr 6 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_waitpid nr 7 nbargs 3 types: (pid_t, int *, int) args: (pid, stat_addr, options)
++syscall sys_creat nr 8 nbargs 2 types: (const char *, int) args: (pathname, mode)
++syscall sys_link nr 9 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
++syscall sys_unlink nr 10 nbargs 1 types: (const char *) args: (pathname)
++syscall sys_chdir nr 12 nbargs 1 types: (const char *) args: (filename)
++syscall sys_time nr 13 nbargs 1 types: (time_t *) args: (tloc)
++syscall sys_mknod nr 14 nbargs 3 types: (const char *, int, unsigned) args: (filename, mode, dev)
++syscall sys_chmod nr 15 nbargs 2 types: (const char *, mode_t) args: (filename, mode)
++syscall sys_lchown16 nr 16 nbargs 3 types: (const char *, old_uid_t, old_gid_t) args: (filename, user, group)
++syscall sys_stat nr 18 nbargs 2 types: (const char *, struct __old_kernel_stat *) args: (filename, statbuf)
++syscall sys_lseek nr 19 nbargs 3 types: (unsigned int, off_t, unsigned int) args: (fd, offset, origin)
++syscall sys_getpid nr 20 nbargs 0 types: () args: ()
++syscall sys_mount nr 21 nbargs 5 types: (char *, char *, char *, unsigned long, void *) args: (dev_name, dir_name, type, flags, data)
++syscall sys_oldumount nr 22 nbargs 1 types: (char *) args: (name)
++syscall sys_setuid16 nr 23 nbargs 1 types: (old_uid_t) args: (uid)
++syscall sys_getuid16 nr 24 nbargs 0 types: () args: ()
++syscall sys_stime nr 25 nbargs 1 types: (time_t *) args: (tptr)
++syscall sys_ptrace nr 26 nbargs 4 types: (long, long, unsigned long, unsigned long) args: (request, pid, addr, data)
++syscall sys_alarm nr 27 nbargs 1 types: (unsigned int) args: (seconds)
++syscall sys_fstat nr 28 nbargs 2 types: (unsigned int, struct __old_kernel_stat *) args: (fd, statbuf)
++syscall sys_pause nr 29 nbargs 0 types: () args: ()
++syscall sys_utime nr 30 nbargs 2 types: (char *, struct utimbuf *) args: (filename, times)
++syscall sys_access nr 33 nbargs 2 types: (const char *, int) args: (filename, mode)
++syscall sys_nice nr 34 nbargs 1 types: (int) args: (increment)
++syscall sys_sync nr 36 nbargs 0 types: () args: ()
++syscall sys_kill nr 37 nbargs 2 types: (pid_t, int) args: (pid, sig)
++syscall sys_rename nr 38 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
++syscall sys_mkdir nr 39 nbargs 2 types: (const char *, int) args: (pathname, mode)
++syscall sys_rmdir nr 40 nbargs 1 types: (const char *) args: (pathname)
++syscall sys_dup nr 41 nbargs 1 types: (unsigned int) args: (fildes)
++syscall sys_pipe nr 42 nbargs 1 types: (int *) args: (fildes)
++syscall sys_times nr 43 nbargs 1 types: (struct tms *) args: (tbuf)
++syscall sys_brk nr 45 nbargs 1 types: (unsigned long) args: (brk)
++syscall sys_setgid16 nr 46 nbargs 1 types: (old_gid_t) args: (gid)
++syscall sys_getgid16 nr 47 nbargs 0 types: () args: ()
++syscall sys_signal nr 48 nbargs 2 types: (int, __sighandler_t) args: (sig, handler)
++syscall sys_geteuid16 nr 49 nbargs 0 types: () args: ()
++syscall sys_getegid16 nr 50 nbargs 0 types: () args: ()
++syscall sys_acct nr 51 nbargs 1 types: (const char *) args: (name)
++syscall sys_umount nr 52 nbargs 2 types: (char *, int) args: (name, flags)
++syscall sys_ioctl nr 54 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
++syscall sys_fcntl nr 55 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
++syscall sys_setpgid nr 57 nbargs 2 types: (pid_t, pid_t) args: (pid, pgid)
++syscall sys_olduname nr 59 nbargs 1 types: (struct oldold_utsname *) args: (name)
++syscall sys_umask nr 60 nbargs 1 types: (int) args: (mask)
++syscall sys_chroot nr 61 nbargs 1 types: (const char *) args: (filename)
++syscall sys_ustat nr 62 nbargs 2 types: (unsigned, struct ustat *) args: (dev, ubuf)
++syscall sys_dup2 nr 63 nbargs 2 types: (unsigned int, unsigned int) args: (oldfd, newfd)
++syscall sys_getppid nr 64 nbargs 0 types: () args: ()
++syscall sys_getpgrp nr 65 nbargs 0 types: () args: ()
++syscall sys_setsid nr 66 nbargs 0 types: () args: ()
++syscall sys_sgetmask nr 68 nbargs 0 types: () args: ()
++syscall sys_ssetmask nr 69 nbargs 1 types: (int) args: (newmask)
++syscall sys_setreuid16 nr 70 nbargs 2 types: (old_uid_t, old_uid_t) args: (ruid, euid)
++syscall sys_setregid16 nr 71 nbargs 2 types: (old_gid_t, old_gid_t) args: (rgid, egid)
++syscall sys_sigpending nr 73 nbargs 1 types: (old_sigset_t *) args: (set)
++syscall sys_sethostname nr 74 nbargs 2 types: (char *, int) args: (name, len)
++syscall sys_setrlimit nr 75 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
++syscall sys_old_getrlimit nr 76 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
++syscall sys_getrusage nr 77 nbargs 2 types: (int, struct rusage *) args: (who, ru)
++syscall sys_gettimeofday nr 78 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
++syscall sys_settimeofday nr 79 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
++syscall sys_getgroups16 nr 80 nbargs 2 types: (int, old_gid_t *) args: (gidsetsize, grouplist)
++syscall sys_setgroups16 nr 81 nbargs 2 types: (int, old_gid_t *) args: (gidsetsize, grouplist)
++syscall sys_old_select nr 82 nbargs 1 types: (struct sel_arg_struct *) args: (arg)
++syscall sys_symlink nr 83 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
++syscall sys_lstat nr 84 nbargs 2 types: (const char *, struct __old_kernel_stat *) args: (filename, statbuf)
++syscall sys_readlink nr 85 nbargs 3 types: (const char *, char *, int) args: (path, buf, bufsiz)
++syscall sys_uselib nr 86 nbargs 1 types: (const char *) args: (library)
++syscall sys_swapon nr 87 nbargs 2 types: (const char *, int) args: (specialfile, swap_flags)
++syscall sys_reboot nr 88 nbargs 4 types: (int, int, unsigned int, void *) args: (magic1, magic2, cmd, arg)
++syscall sys_old_readdir nr 89 nbargs 3 types: (unsigned int, struct old_linux_dirent *, unsigned int) args: (fd, dirent, count)
++syscall sys_old_mmap nr 90 nbargs 1 types: (struct mmap_arg_struct *) args: (arg)
++syscall sys_munmap nr 91 nbargs 2 types: (unsigned long, size_t) args: (addr, len)
++syscall sys_truncate nr 92 nbargs 2 types: (const char *, long) args: (path, length)
++syscall sys_ftruncate nr 93 nbargs 2 types: (unsigned int, unsigned long) args: (fd, length)
++syscall sys_fchmod nr 94 nbargs 2 types: (unsigned int, mode_t) args: (fd, mode)
++syscall sys_fchown16 nr 95 nbargs 3 types: (unsigned int, old_uid_t, old_gid_t) args: (fd, user, group)
++syscall sys_getpriority nr 96 nbargs 2 types: (int, int) args: (which, who)
++syscall sys_setpriority nr 97 nbargs 3 types: (int, int, int) args: (which, who, niceval)
++syscall sys_statfs nr 99 nbargs 2 types: (const char *, struct statfs *) args: (pathname, buf)
++syscall sys_fstatfs nr 100 nbargs 2 types: (unsigned int, struct statfs *) args: (fd, buf)
++syscall sys_socketcall nr 102 nbargs 2 types: (int, unsigned long *) args: (call, args)
++syscall sys_syslog nr 103 nbargs 3 types: (int, char *, int) args: (type, buf, len)
++syscall sys_setitimer nr 104 nbargs 3 types: (int, struct itimerval *, struct itimerval *) args: (which, value, ovalue)
++syscall sys_getitimer nr 105 nbargs 2 types: (int, struct itimerval *) args: (which, value)
++syscall sys_newstat nr 106 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
++syscall sys_newlstat nr 107 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
++syscall sys_newfstat nr 108 nbargs 2 types: (unsigned int, struct stat *) args: (fd, statbuf)
++syscall sys_uname nr 109 nbargs 1 types: (struct old_utsname *) args: (name)
++syscall sys_vhangup nr 111 nbargs 0 types: () args: ()
++syscall sys_wait4 nr 114 nbargs 4 types: (pid_t, int *, int, struct rusage *) args: (upid, stat_addr, options, ru)
++syscall sys_swapoff nr 115 nbargs 1 types: (const char *) args: (specialfile)
++syscall sys_sysinfo nr 116 nbargs 1 types: (struct sysinfo *) args: (info)
++syscall sys_ipc nr 117 nbargs 6 types: (unsigned int, int, unsigned long, unsigned long, void *, long) args: (call, first, second, third, ptr, fifth)
++syscall sys_fsync nr 118 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_setdomainname nr 121 nbargs 2 types: (char *, int) args: (name, len)
++syscall sys_newuname nr 122 nbargs 1 types: (struct new_utsname *) args: (name)
++syscall sys_adjtimex nr 124 nbargs 1 types: (struct timex *) args: (txc_p)
++syscall sys_mprotect nr 125 nbargs 3 types: (unsigned long, size_t, unsigned long) args: (start, len, prot)
++syscall sys_sigprocmask nr 126 nbargs 3 types: (int, old_sigset_t *, old_sigset_t *) args: (how, nset, oset)
++syscall sys_init_module nr 128 nbargs 3 types: (void *, unsigned long, const char *) args: (umod, len, uargs)
++syscall sys_delete_module nr 129 nbargs 2 types: (const char *, unsigned int) args: (name_user, flags)
++syscall sys_quotactl nr 131 nbargs 4 types: (unsigned int, const char *, qid_t, void *) args: (cmd, special, id, addr)
++syscall sys_getpgid nr 132 nbargs 1 types: (pid_t) args: (pid)
++syscall sys_fchdir nr 133 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_bdflush nr 134 nbargs 2 types: (int, long) args: (func, data)
++syscall sys_sysfs nr 135 nbargs 3 types: (int, unsigned long, unsigned long) args: (option, arg1, arg2)
++syscall sys_personality nr 136 nbargs 1 types: (unsigned int) args: (personality)
++syscall sys_setfsuid16 nr 138 nbargs 1 types: (old_uid_t) args: (uid)
++syscall sys_setfsgid16 nr 139 nbargs 1 types: (old_gid_t) args: (gid)
++syscall sys_llseek nr 140 nbargs 5 types: (unsigned int, unsigned long, unsigned long, loff_t *, unsigned int) args: (fd, offset_high, offset_low, result, origin)
++syscall sys_getdents nr 141 nbargs 3 types: (unsigned int, struct linux_dirent *, unsigned int) args: (fd, dirent, count)
++syscall sys_select nr 142 nbargs 5 types: (int, fd_set *, fd_set *, fd_set *, struct timeval *) args: (n, inp, outp, exp, tvp)
++syscall sys_flock nr 143 nbargs 2 types: (unsigned int, unsigned int) args: (fd, cmd)
++syscall sys_msync nr 144 nbargs 3 types: (unsigned long, size_t, int) args: (start, len, flags)
++syscall sys_readv nr 145 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
++syscall sys_writev nr 146 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
++syscall sys_getsid nr 147 nbargs 1 types: (pid_t) args: (pid)
++syscall sys_fdatasync nr 148 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_sysctl nr 149 nbargs 1 types: (struct __sysctl_args *) args: (args)
++syscall sys_mlock nr 150 nbargs 2 types: (unsigned long, size_t) args: (start, len)
++syscall sys_munlock nr 151 nbargs 2 types: (unsigned long, size_t) args: (start, len)
++syscall sys_mlockall nr 152 nbargs 1 types: (int) args: (flags)
++syscall sys_munlockall nr 153 nbargs 0 types: () args: ()
++syscall sys_sched_setparam nr 154 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
++syscall sys_sched_getparam nr 155 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
++syscall sys_sched_setscheduler nr 156 nbargs 3 types: (pid_t, int, struct sched_param *) args: (pid, policy, param)
++syscall sys_sched_getscheduler nr 157 nbargs 1 types: (pid_t) args: (pid)
++syscall sys_sched_yield nr 158 nbargs 0 types: () args: ()
++syscall sys_sched_get_priority_max nr 159 nbargs 1 types: (int) args: (policy)
++syscall sys_sched_get_priority_min nr 160 nbargs 1 types: (int) args: (policy)
++syscall sys_sched_rr_get_interval nr 161 nbargs 2 types: (pid_t, struct timespec *) args: (pid, interval)
++syscall sys_nanosleep nr 162 nbargs 2 types: (struct timespec *, struct timespec *) args: (rqtp, rmtp)
++syscall sys_mremap nr 163 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (addr, old_len, new_len, flags, new_addr)
++syscall sys_setresuid16 nr 164 nbargs 3 types: (old_uid_t, old_uid_t, old_uid_t) args: (ruid, euid, suid)
++syscall sys_getresuid16 nr 165 nbargs 3 types: (old_uid_t *, old_uid_t *, old_uid_t *) args: (ruid, euid, suid)
++syscall sys_poll nr 168 nbargs 3 types: (struct pollfd *, unsigned int, long) args: (ufds, nfds, timeout_msecs)
++syscall sys_setresgid16 nr 170 nbargs 3 types: (old_gid_t, old_gid_t, old_gid_t) args: (rgid, egid, sgid)
++syscall sys_getresgid16 nr 171 nbargs 3 types: (old_gid_t *, old_gid_t *, old_gid_t *) args: (rgid, egid, sgid)
++syscall sys_prctl nr 172 nbargs 5 types: (int, unsigned long, unsigned long, unsigned long, unsigned long) args: (option, arg2, arg3, arg4, arg5)
++syscall sys_rt_sigaction nr 174 nbargs 4 types: (int, const struct sigaction *, struct sigaction *, size_t) args: (sig, act, oact, sigsetsize)
++syscall sys_rt_sigprocmask nr 175 nbargs 4 types: (int, sigset_t *, sigset_t *, size_t) args: (how, nset, oset, sigsetsize)
++syscall sys_rt_sigpending nr 176 nbargs 2 types: (sigset_t *, size_t) args: (set, sigsetsize)
++syscall sys_rt_sigtimedwait nr 177 nbargs 4 types: (const sigset_t *, siginfo_t *, const struct timespec *, size_t) args: (uthese, uinfo, uts, sigsetsize)
++syscall sys_rt_sigqueueinfo nr 178 nbargs 3 types: (pid_t, int, siginfo_t *) args: (pid, sig, uinfo)
++syscall sys_rt_sigsuspend nr 179 nbargs 2 types: (sigset_t *, size_t) args: (unewset, sigsetsize)
++syscall sys_chown16 nr 182 nbargs 3 types: (const char *, old_uid_t, old_gid_t) args: (filename, user, group)
++syscall sys_getcwd nr 183 nbargs 2 types: (char *, unsigned long) args: (buf, size)
++syscall sys_capget nr 184 nbargs 2 types: (cap_user_header_t, cap_user_data_t) args: (header, dataptr)
++syscall sys_capset nr 185 nbargs 2 types: (cap_user_header_t, const cap_user_data_t) args: (header, data)
++syscall sys_sendfile nr 187 nbargs 4 types: (int, int, off_t *, size_t) args: (out_fd, in_fd, offset, count)
++syscall sys_getrlimit nr 191 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
++syscall sys_mmap_pgoff nr 192 nbargs 6 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (addr, len, prot, flags, fd, pgoff)
++syscall sys_stat64 nr 195 nbargs 2 types: (const char *, struct stat64 *) args: (filename, statbuf)
++syscall sys_lstat64 nr 196 nbargs 2 types: (const char *, struct stat64 *) args: (filename, statbuf)
++syscall sys_fstat64 nr 197 nbargs 2 types: (unsigned long, struct stat64 *) args: (fd, statbuf)
++syscall sys_lchown nr 198 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
++syscall sys_getuid nr 199 nbargs 0 types: () args: ()
++syscall sys_getgid nr 200 nbargs 0 types: () args: ()
++syscall sys_geteuid nr 201 nbargs 0 types: () args: ()
++syscall sys_getegid nr 202 nbargs 0 types: () args: ()
++syscall sys_setreuid nr 203 nbargs 2 types: (uid_t, uid_t) args: (ruid, euid)
++syscall sys_setregid nr 204 nbargs 2 types: (gid_t, gid_t) args: (rgid, egid)
++syscall sys_getgroups nr 205 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
++syscall sys_setgroups nr 206 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
++syscall sys_fchown nr 207 nbargs 3 types: (unsigned int, uid_t, gid_t) args: (fd, user, group)
++syscall sys_setresuid nr 208 nbargs 3 types: (uid_t, uid_t, uid_t) args: (ruid, euid, suid)
++syscall sys_getresuid nr 209 nbargs 3 types: (uid_t *, uid_t *, uid_t *) args: (ruid, euid, suid)
++syscall sys_setresgid nr 210 nbargs 3 types: (gid_t, gid_t, gid_t) args: (rgid, egid, sgid)
++syscall sys_getresgid nr 211 nbargs 3 types: (gid_t *, gid_t *, gid_t *) args: (rgid, egid, sgid)
++syscall sys_chown nr 212 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
++syscall sys_setuid nr 213 nbargs 1 types: (uid_t) args: (uid)
++syscall sys_setgid nr 214 nbargs 1 types: (gid_t) args: (gid)
++syscall sys_setfsuid nr 215 nbargs 1 types: (uid_t) args: (uid)
++syscall sys_setfsgid nr 216 nbargs 1 types: (gid_t) args: (gid)
++syscall sys_pivot_root nr 217 nbargs 2 types: (const char *, const char *) args: (new_root, put_old)
++syscall sys_mincore nr 218 nbargs 3 types: (unsigned long, size_t, unsigned char *) args: (start, len, vec)
++syscall sys_madvise nr 219 nbargs 3 types: (unsigned long, size_t, int) args: (start, len_in, behavior)
++syscall sys_getdents64 nr 220 nbargs 3 types: (unsigned int, struct linux_dirent64 *, unsigned int) args: (fd, dirent, count)
++syscall sys_fcntl64 nr 221 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
++syscall sys_gettid nr 224 nbargs 0 types: () args: ()
++syscall sys_setxattr nr 226 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
++syscall sys_lsetxattr nr 227 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
++syscall sys_fsetxattr nr 228 nbargs 5 types: (int, const char *, const void *, size_t, int) args: (fd, name, value, size, flags)
++syscall sys_getxattr nr 229 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
++syscall sys_lgetxattr nr 230 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
++syscall sys_fgetxattr nr 231 nbargs 4 types: (int, const char *, void *, size_t) args: (fd, name, value, size)
++syscall sys_listxattr nr 232 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
++syscall sys_llistxattr nr 233 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
++syscall sys_flistxattr nr 234 nbargs 3 types: (int, char *, size_t) args: (fd, list, size)
++syscall sys_removexattr nr 235 nbargs 2 types: (const char *, const char *) args: (pathname, name)
++syscall sys_lremovexattr nr 236 nbargs 2 types: (const char *, const char *) args: (pathname, name)
++syscall sys_fremovexattr nr 237 nbargs 2 types: (int, const char *) args: (fd, name)
++syscall sys_tkill nr 238 nbargs 2 types: (pid_t, int) args: (pid, sig)
++syscall sys_sendfile64 nr 239 nbargs 4 types: (int, int, loff_t *, size_t) args: (out_fd, in_fd, offset, count)
++syscall sys_futex nr 240 nbargs 6 types: (u32 *, int, u32, struct timespec *, u32 *, u32) args: (uaddr, op, val, utime, uaddr2, val3)
++syscall sys_sched_setaffinity nr 241 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
++syscall sys_sched_getaffinity nr 242 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
++syscall sys_io_setup nr 245 nbargs 2 types: (unsigned, aio_context_t *) args: (nr_events, ctxp)
++syscall sys_io_destroy nr 246 nbargs 1 types: (aio_context_t) args: (ctx)
++syscall sys_io_getevents nr 247 nbargs 5 types: (aio_context_t, long, long, struct io_event *, struct timespec *) args: (ctx_id, min_nr, nr, events, timeout)
++syscall sys_io_submit nr 248 nbargs 3 types: (aio_context_t, long, struct iocb * *) args: (ctx_id, nr, iocbpp)
++syscall sys_io_cancel nr 249 nbargs 3 types: (aio_context_t, struct iocb *, struct io_event *) args: (ctx_id, iocb, result)
++syscall sys_exit_group nr 252 nbargs 1 types: (int) args: (error_code)
++syscall sys_epoll_create nr 254 nbargs 1 types: (int) args: (size)
++syscall sys_epoll_ctl nr 255 nbargs 4 types: (int, int, int, struct epoll_event *) args: (epfd, op, fd, event)
++syscall sys_epoll_wait nr 256 nbargs 4 types: (int, struct epoll_event *, int, int) args: (epfd, events, maxevents, timeout)
++syscall sys_remap_file_pages nr 257 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (start, size, prot, pgoff, flags)
++syscall sys_set_tid_address nr 258 nbargs 1 types: (int *) args: (tidptr)
++syscall sys_timer_create nr 259 nbargs 3 types: (const clockid_t, struct sigevent *, timer_t *) args: (which_clock, timer_event_spec, created_timer_id)
++syscall sys_timer_settime nr 260 nbargs 4 types: (timer_t, int, const struct itimerspec *, struct itimerspec *) args: (timer_id, flags, new_setting, old_setting)
++syscall sys_timer_gettime nr 261 nbargs 2 types: (timer_t, struct itimerspec *) args: (timer_id, setting)
++syscall sys_timer_getoverrun nr 262 nbargs 1 types: (timer_t) args: (timer_id)
++syscall sys_timer_delete nr 263 nbargs 1 types: (timer_t) args: (timer_id)
++syscall sys_clock_settime nr 264 nbargs 2 types: (const clockid_t, const struct timespec *) args: (which_clock, tp)
++syscall sys_clock_gettime nr 265 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
++syscall sys_clock_getres nr 266 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
++syscall sys_clock_nanosleep nr 267 nbargs 4 types: (const clockid_t, int, const struct timespec *, struct timespec *) args: (which_clock, flags, rqtp, rmtp)
++syscall sys_statfs64 nr 268 nbargs 3 types: (const char *, size_t, struct statfs64 *) args: (pathname, sz, buf)
++syscall sys_fstatfs64 nr 269 nbargs 3 types: (unsigned int, size_t, struct statfs64 *) args: (fd, sz, buf)
++syscall sys_tgkill nr 270 nbargs 3 types: (pid_t, pid_t, int) args: (tgid, pid, sig)
++syscall sys_utimes nr 271 nbargs 2 types: (char *, struct timeval *) args: (filename, utimes)
++syscall sys_mq_open nr 277 nbargs 4 types: (const char *, int, mode_t, struct mq_attr *) args: (u_name, oflag, mode, u_attr)
++syscall sys_mq_unlink nr 278 nbargs 1 types: (const char *) args: (u_name)
++syscall sys_mq_timedsend nr 279 nbargs 5 types: (mqd_t, const char *, size_t, unsigned int, const struct timespec *) args: (mqdes, u_msg_ptr, msg_len, msg_prio, u_abs_timeout)
++syscall sys_mq_timedreceive nr 280 nbargs 5 types: (mqd_t, char *, size_t, unsigned int *, const struct timespec *) args: (mqdes, u_msg_ptr, msg_len, u_msg_prio, u_abs_timeout)
++syscall sys_mq_notify nr 281 nbargs 2 types: (mqd_t, const struct sigevent *) args: (mqdes, u_notification)
++syscall sys_mq_getsetattr nr 282 nbargs 3 types: (mqd_t, const struct mq_attr *, struct mq_attr *) args: (mqdes, u_mqstat, u_omqstat)
++syscall sys_kexec_load nr 283 nbargs 4 types: (unsigned long, unsigned long, struct kexec_segment *, unsigned long) args: (entry, nr_segments, segments, flags)
++syscall sys_waitid nr 284 nbargs 5 types: (int, pid_t, struct siginfo *, int, struct rusage *) args: (which, upid, infop, options, ru)
++syscall sys_add_key nr 286 nbargs 5 types: (const char *, const char *, const void *, size_t, key_serial_t) args: (_type, _description, _payload, plen, ringid)
++syscall sys_request_key nr 287 nbargs 4 types: (const char *, const char *, const char *, key_serial_t) args: (_type, _description, _callout_info, destringid)
++syscall sys_keyctl nr 288 nbargs 5 types: (int, unsigned long, unsigned long, unsigned long, unsigned long) args: (option, arg2, arg3, arg4, arg5)
++syscall sys_ioprio_set nr 289 nbargs 3 types: (int, int, int) args: (which, who, ioprio)
++syscall sys_ioprio_get nr 290 nbargs 2 types: (int, int) args: (which, who)
++syscall sys_inotify_init nr 291 nbargs 0 types: () args: ()
++syscall sys_inotify_add_watch nr 292 nbargs 3 types: (int, const char *, u32) args: (fd, pathname, mask)
++syscall sys_inotify_rm_watch nr 293 nbargs 2 types: (int, __s32) args: (fd, wd)
++syscall sys_openat nr 295 nbargs 4 types: (int, const char *, int, int) args: (dfd, filename, flags, mode)
++syscall sys_mkdirat nr 296 nbargs 3 types: (int, const char *, int) args: (dfd, pathname, mode)
++syscall sys_mknodat nr 297 nbargs 4 types: (int, const char *, int, unsigned) args: (dfd, filename, mode, dev)
++syscall sys_fchownat nr 298 nbargs 5 types: (int, const char *, uid_t, gid_t, int) args: (dfd, filename, user, group, flag)
++syscall sys_futimesat nr 299 nbargs 3 types: (int, const char *, struct timeval *) args: (dfd, filename, utimes)
++syscall sys_fstatat64 nr 300 nbargs 4 types: (int, const char *, struct stat64 *, int) args: (dfd, filename, statbuf, flag)
++syscall sys_unlinkat nr 301 nbargs 3 types: (int, const char *, int) args: (dfd, pathname, flag)
++syscall sys_renameat nr 302 nbargs 4 types: (int, const char *, int, const char *) args: (olddfd, oldname, newdfd, newname)
++syscall sys_linkat nr 303 nbargs 5 types: (int, const char *, int, const char *, int) args: (olddfd, oldname, newdfd, newname, flags)
++syscall sys_symlinkat nr 304 nbargs 3 types: (const char *, int, const char *) args: (oldname, newdfd, newname)
++syscall sys_readlinkat nr 305 nbargs 4 types: (int, const char *, char *, int) args: (dfd, pathname, buf, bufsiz)
++syscall sys_fchmodat nr 306 nbargs 3 types: (int, const char *, mode_t) args: (dfd, filename, mode)
++syscall sys_faccessat nr 307 nbargs 3 types: (int, const char *, int) args: (dfd, filename, mode)
++syscall sys_pselect6 nr 308 nbargs 6 types: (int, fd_set *, fd_set *, fd_set *, struct timespec *, void *) args: (n, inp, outp, exp, tsp, sig)
++syscall sys_ppoll nr 309 nbargs 5 types: (struct pollfd *, unsigned int, struct timespec *, const sigset_t *, size_t) args: (ufds, nfds, tsp, sigmask, sigsetsize)
++syscall sys_unshare nr 310 nbargs 1 types: (unsigned long) args: (unshare_flags)
++syscall sys_set_robust_list nr 311 nbargs 2 types: (struct robust_list_head *, size_t) args: (head, len)
++syscall sys_get_robust_list nr 312 nbargs 3 types: (int, struct robust_list_head * *, size_t *) args: (pid, head_ptr, len_ptr)
++syscall sys_splice nr 313 nbargs 6 types: (int, loff_t *, int, loff_t *, size_t, unsigned int) args: (fd_in, off_in, fd_out, off_out, len, flags)
++syscall sys_tee nr 315 nbargs 4 types: (int, int, size_t, unsigned int) args: (fdin, fdout, len, flags)
++syscall sys_vmsplice nr 316 nbargs 4 types: (int, const struct iovec *, unsigned long, unsigned int) args: (fd, iov, nr_segs, flags)
++syscall sys_getcpu nr 318 nbargs 3 types: (unsigned *, unsigned *, struct getcpu_cache *) args: (cpup, nodep, unused)
++syscall sys_epoll_pwait nr 319 nbargs 6 types: (int, struct epoll_event *, int, int, const sigset_t *, size_t) args: (epfd, events, maxevents, timeout, sigmask, sigsetsize)
++syscall sys_utimensat nr 320 nbargs 4 types: (int, const char *, struct timespec *, int) args: (dfd, filename, utimes, flags)
++syscall sys_signalfd nr 321 nbargs 3 types: (int, sigset_t *, size_t) args: (ufd, user_mask, sizemask)
++syscall sys_timerfd_create nr 322 nbargs 2 types: (int, int) args: (clockid, flags)
++syscall sys_eventfd nr 323 nbargs 1 types: (unsigned int) args: (count)
++syscall sys_timerfd_settime nr 325 nbargs 4 types: (int, int, const struct itimerspec *, struct itimerspec *) args: (ufd, flags, utmr, otmr)
++syscall sys_timerfd_gettime nr 326 nbargs 2 types: (int, struct itimerspec *) args: (ufd, otmr)
++syscall sys_signalfd4 nr 327 nbargs 4 types: (int, sigset_t *, size_t, int) args: (ufd, user_mask, sizemask, flags)
++syscall sys_eventfd2 nr 328 nbargs 2 types: (unsigned int, int) args: (count, flags)
++syscall sys_epoll_create1 nr 329 nbargs 1 types: (int) args: (flags)
++syscall sys_dup3 nr 330 nbargs 3 types: (unsigned int, unsigned int, int) args: (oldfd, newfd, flags)
++syscall sys_pipe2 nr 331 nbargs 2 types: (int *, int) args: (fildes, flags)
++syscall sys_inotify_init1 nr 332 nbargs 1 types: (int) args: (flags)
++syscall sys_preadv nr 333 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
++syscall sys_pwritev nr 334 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
++syscall sys_rt_tgsigqueueinfo nr 335 nbargs 4 types: (pid_t, pid_t, int, siginfo_t *) args: (tgid, pid, sig, uinfo)
++syscall sys_perf_event_open nr 336 nbargs 5 types: (struct perf_event_attr *, pid_t, int, int, unsigned long) args: (attr_uptr, pid, cpu, group_fd, flags)
++syscall sys_recvmmsg nr 337 nbargs 5 types: (int, struct mmsghdr *, unsigned int, unsigned int, struct timespec *) args: (fd, mmsg, vlen, flags, timeout)
++syscall sys_fanotify_init nr 338 nbargs 2 types: (unsigned int, unsigned int) args: (flags, event_f_flags)
++syscall sys_prlimit64 nr 340 nbargs 4 types: (pid_t, unsigned int, const struct rlimit64 *, struct rlimit64 *) args: (pid, resource, new_rlim, old_rlim)
++syscall sys_clock_adjtime nr 343 nbargs 2 types: (const clockid_t, struct timex *) args: (which_clock, utx)
++syscall sys_syncfs nr 344 nbargs 1 types: (int) args: (fd)
++syscall sys_sendmmsg nr 345 nbargs 4 types: (int, struct mmsghdr *, unsigned int, unsigned int) args: (fd, mmsg, vlen, flags)
++syscall sys_setns nr 346 nbargs 2 types: (int, int) args: (fd, nstype)
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/3.10.0-rc7/x86-64-syscalls-3.10.0-rc7
+@@ -0,0 +1,290 @@
++syscall sys_read nr 0 nbargs 3 types: (unsigned int, char *, size_t) args: (fd, buf, count)
++syscall sys_write nr 1 nbargs 3 types: (unsigned int, const char *, size_t) args: (fd, buf, count)
++syscall sys_open nr 2 nbargs 3 types: (const char *, int, umode_t) args: (filename, flags, mode)
++syscall sys_close nr 3 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_newstat nr 4 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
++syscall sys_newfstat nr 5 nbargs 2 types: (unsigned int, struct stat *) args: (fd, statbuf)
++syscall sys_newlstat nr 6 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
++syscall sys_poll nr 7 nbargs 3 types: (struct pollfd *, unsigned int, int) args: (ufds, nfds, timeout_msecs)
++syscall sys_lseek nr 8 nbargs 3 types: (unsigned int, off_t, unsigned int) args: (fd, offset, whence)
++syscall sys_mmap nr 9 nbargs 6 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (addr, len, prot, flags, fd, off)
++syscall sys_mprotect nr 10 nbargs 3 types: (unsigned long, size_t, unsigned long) args: (start, len, prot)
++syscall sys_munmap nr 11 nbargs 2 types: (unsigned long, size_t) args: (addr, len)
++syscall sys_brk nr 12 nbargs 1 types: (unsigned long) args: (brk)
++syscall sys_rt_sigaction nr 13 nbargs 4 types: (int, const struct sigaction *, struct sigaction *, size_t) args: (sig, act, oact, sigsetsize)
++syscall sys_rt_sigprocmask nr 14 nbargs 4 types: (int, sigset_t *, sigset_t *, size_t) args: (how, nset, oset, sigsetsize)
++syscall sys_ioctl nr 16 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
++syscall sys_pread64 nr 17 nbargs 4 types: (unsigned int, char *, size_t, loff_t) args: (fd, buf, count, pos)
++syscall sys_pwrite64 nr 18 nbargs 4 types: (unsigned int, const char *, size_t, loff_t) args: (fd, buf, count, pos)
++syscall sys_readv nr 19 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
++syscall sys_writev nr 20 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
++syscall sys_access nr 21 nbargs 2 types: (const char *, int) args: (filename, mode)
++syscall sys_pipe nr 22 nbargs 1 types: (int *) args: (fildes)
++syscall sys_select nr 23 nbargs 5 types: (int, fd_set *, fd_set *, fd_set *, struct timeval *) args: (n, inp, outp, exp, tvp)
++syscall sys_sched_yield nr 24 nbargs 0 types: () args: ()
++syscall sys_mremap nr 25 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (addr, old_len, new_len, flags, new_addr)
++syscall sys_msync nr 26 nbargs 3 types: (unsigned long, size_t, int) args: (start, len, flags)
++syscall sys_mincore nr 27 nbargs 3 types: (unsigned long, size_t, unsigned char *) args: (start, len, vec)
++syscall sys_madvise nr 28 nbargs 3 types: (unsigned long, size_t, int) args: (start, len_in, behavior)
++syscall sys_shmget nr 29 nbargs 3 types: (key_t, size_t, int) args: (key, size, shmflg)
++syscall sys_shmat nr 30 nbargs 3 types: (int, char *, int) args: (shmid, shmaddr, shmflg)
++syscall sys_shmctl nr 31 nbargs 3 types: (int, int, struct shmid_ds *) args: (shmid, cmd, buf)
++syscall sys_dup nr 32 nbargs 1 types: (unsigned int) args: (fildes)
++syscall sys_dup2 nr 33 nbargs 2 types: (unsigned int, unsigned int) args: (oldfd, newfd)
++syscall sys_pause nr 34 nbargs 0 types: () args: ()
++syscall sys_nanosleep nr 35 nbargs 2 types: (struct timespec *, struct timespec *) args: (rqtp, rmtp)
++syscall sys_getitimer nr 36 nbargs 2 types: (int, struct itimerval *) args: (which, value)
++syscall sys_alarm nr 37 nbargs 1 types: (unsigned int) args: (seconds)
++syscall sys_setitimer nr 38 nbargs 3 types: (int, struct itimerval *, struct itimerval *) args: (which, value, ovalue)
++syscall sys_getpid nr 39 nbargs 0 types: () args: ()
++syscall sys_sendfile64 nr 40 nbargs 4 types: (int, int, loff_t *, size_t) args: (out_fd, in_fd, offset, count)
++syscall sys_socket nr 41 nbargs 3 types: (int, int, int) args: (family, type, protocol)
++syscall sys_connect nr 42 nbargs 3 types: (int, struct sockaddr *, int) args: (fd, uservaddr, addrlen)
++syscall sys_accept nr 43 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, upeer_sockaddr, upeer_addrlen)
++syscall sys_sendto nr 44 nbargs 6 types: (int, void *, size_t, unsigned int, struct sockaddr *, int) args: (fd, buff, len, flags, addr, addr_len)
++syscall sys_recvfrom nr 45 nbargs 6 types: (int, void *, size_t, unsigned int, struct sockaddr *, int *) args: (fd, ubuf, size, flags, addr, addr_len)
++syscall sys_sendmsg nr 46 nbargs 3 types: (int, struct msghdr *, unsigned int) args: (fd, msg, flags)
++syscall sys_recvmsg nr 47 nbargs 3 types: (int, struct msghdr *, unsigned int) args: (fd, msg, flags)
++syscall sys_shutdown nr 48 nbargs 2 types: (int, int) args: (fd, how)
++syscall sys_bind nr 49 nbargs 3 types: (int, struct sockaddr *, int) args: (fd, umyaddr, addrlen)
++syscall sys_listen nr 50 nbargs 2 types: (int, int) args: (fd, backlog)
++syscall sys_getsockname nr 51 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, usockaddr, usockaddr_len)
++syscall sys_getpeername nr 52 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, usockaddr, usockaddr_len)
++syscall sys_socketpair nr 53 nbargs 4 types: (int, int, int, int *) args: (family, type, protocol, usockvec)
++syscall sys_setsockopt nr 54 nbargs 5 types: (int, int, int, char *, int) args: (fd, level, optname, optval, optlen)
++syscall sys_getsockopt nr 55 nbargs 5 types: (int, int, int, char *, int *) args: (fd, level, optname, optval, optlen)
++syscall sys_exit nr 60 nbargs 1 types: (int) args: (error_code)
++syscall sys_wait4 nr 61 nbargs 4 types: (pid_t, int *, int, struct rusage *) args: (upid, stat_addr, options, ru)
++syscall sys_kill nr 62 nbargs 2 types: (pid_t, int) args: (pid, sig)
++syscall sys_newuname nr 63 nbargs 1 types: (struct new_utsname *) args: (name)
++syscall sys_semget nr 64 nbargs 3 types: (key_t, int, int) args: (key, nsems, semflg)
++syscall sys_semop nr 65 nbargs 3 types: (int, struct sembuf *, unsigned) args: (semid, tsops, nsops)
++syscall sys_semctl nr 66 nbargs 4 types: (int, int, int, unsigned long) args: (semid, semnum, cmd, arg)
++syscall sys_shmdt nr 67 nbargs 1 types: (char *) args: (shmaddr)
++syscall sys_msgget nr 68 nbargs 2 types: (key_t, int) args: (key, msgflg)
++syscall sys_msgsnd nr 69 nbargs 4 types: (int, struct msgbuf *, size_t, int) args: (msqid, msgp, msgsz, msgflg)
++syscall sys_msgrcv nr 70 nbargs 5 types: (int, struct msgbuf *, size_t, long, int) args: (msqid, msgp, msgsz, msgtyp, msgflg)
++syscall sys_msgctl nr 71 nbargs 3 types: (int, int, struct msqid_ds *) args: (msqid, cmd, buf)
++syscall sys_fcntl nr 72 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
++syscall sys_flock nr 73 nbargs 2 types: (unsigned int, unsigned int) args: (fd, cmd)
++syscall sys_fsync nr 74 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_fdatasync nr 75 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_truncate nr 76 nbargs 2 types: (const char *, long) args: (path, length)
++syscall sys_ftruncate nr 77 nbargs 2 types: (unsigned int, unsigned long) args: (fd, length)
++syscall sys_getdents nr 78 nbargs 3 types: (unsigned int, struct linux_dirent *, unsigned int) args: (fd, dirent, count)
++syscall sys_getcwd nr 79 nbargs 2 types: (char *, unsigned long) args: (buf, size)
++syscall sys_chdir nr 80 nbargs 1 types: (const char *) args: (filename)
++syscall sys_fchdir nr 81 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_rename nr 82 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
++syscall sys_mkdir nr 83 nbargs 2 types: (const char *, umode_t) args: (pathname, mode)
++syscall sys_rmdir nr 84 nbargs 1 types: (const char *) args: (pathname)
++syscall sys_creat nr 85 nbargs 2 types: (const char *, umode_t) args: (pathname, mode)
++syscall sys_link nr 86 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
++syscall sys_unlink nr 87 nbargs 1 types: (const char *) args: (pathname)
++syscall sys_symlink nr 88 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
++syscall sys_readlink nr 89 nbargs 3 types: (const char *, char *, int) args: (path, buf, bufsiz)
++syscall sys_chmod nr 90 nbargs 2 types: (const char *, umode_t) args: (filename, mode)
++syscall sys_fchmod nr 91 nbargs 2 types: (unsigned int, umode_t) args: (fd, mode)
++syscall sys_chown nr 92 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
++syscall sys_fchown nr 93 nbargs 3 types: (unsigned int, uid_t, gid_t) args: (fd, user, group)
++syscall sys_lchown nr 94 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
++syscall sys_umask nr 95 nbargs 1 types: (int) args: (mask)
++syscall sys_gettimeofday nr 96 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
++syscall sys_getrlimit nr 97 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
++syscall sys_getrusage nr 98 nbargs 2 types: (int, struct rusage *) args: (who, ru)
++syscall sys_sysinfo nr 99 nbargs 1 types: (struct sysinfo *) args: (info)
++syscall sys_times nr 100 nbargs 1 types: (struct tms *) args: (tbuf)
++syscall sys_ptrace nr 101 nbargs 4 types: (long, long, unsigned long, unsigned long) args: (request, pid, addr, data)
++syscall sys_getuid nr 102 nbargs 0 types: () args: ()
++syscall sys_syslog nr 103 nbargs 3 types: (int, char *, int) args: (type, buf, len)
++syscall sys_getgid nr 104 nbargs 0 types: () args: ()
++syscall sys_setuid nr 105 nbargs 1 types: (uid_t) args: (uid)
++syscall sys_setgid nr 106 nbargs 1 types: (gid_t) args: (gid)
++syscall sys_geteuid nr 107 nbargs 0 types: () args: ()
++syscall sys_getegid nr 108 nbargs 0 types: () args: ()
++syscall sys_setpgid nr 109 nbargs 2 types: (pid_t, pid_t) args: (pid, pgid)
++syscall sys_getppid nr 110 nbargs 0 types: () args: ()
++syscall sys_getpgrp nr 111 nbargs 0 types: () args: ()
++syscall sys_setsid nr 112 nbargs 0 types: () args: ()
++syscall sys_setreuid nr 113 nbargs 2 types: (uid_t, uid_t) args: (ruid, euid)
++syscall sys_setregid nr 114 nbargs 2 types: (gid_t, gid_t) args: (rgid, egid)
++syscall sys_getgroups nr 115 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
++syscall sys_setgroups nr 116 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
++syscall sys_setresuid nr 117 nbargs 3 types: (uid_t, uid_t, uid_t) args: (ruid, euid, suid)
++syscall sys_getresuid nr 118 nbargs 3 types: (uid_t *, uid_t *, uid_t *) args: (ruidp, euidp, suidp)
++syscall sys_setresgid nr 119 nbargs 3 types: (gid_t, gid_t, gid_t) args: (rgid, egid, sgid)
++syscall sys_getresgid nr 120 nbargs 3 types: (gid_t *, gid_t *, gid_t *) args: (rgidp, egidp, sgidp)
++syscall sys_getpgid nr 121 nbargs 1 types: (pid_t) args: (pid)
++syscall sys_setfsuid nr 122 nbargs 1 types: (uid_t) args: (uid)
++syscall sys_setfsgid nr 123 nbargs 1 types: (gid_t) args: (gid)
++syscall sys_getsid nr 124 nbargs 1 types: (pid_t) args: (pid)
++syscall sys_capget nr 125 nbargs 2 types: (cap_user_header_t, cap_user_data_t) args: (header, dataptr)
++syscall sys_capset nr 126 nbargs 2 types: (cap_user_header_t, const cap_user_data_t) args: (header, data)
++syscall sys_rt_sigpending nr 127 nbargs 2 types: (sigset_t *, size_t) args: (uset, sigsetsize)
++syscall sys_rt_sigtimedwait nr 128 nbargs 4 types: (const sigset_t *, siginfo_t *, const struct timespec *, size_t) args: (uthese, uinfo, uts, sigsetsize)
++syscall sys_rt_sigqueueinfo nr 129 nbargs 3 types: (pid_t, int, siginfo_t *) args: (pid, sig, uinfo)
++syscall sys_rt_sigsuspend nr 130 nbargs 2 types: (sigset_t *, size_t) args: (unewset, sigsetsize)
++syscall sys_sigaltstack nr 131 nbargs 2 types: (const stack_t *, stack_t *) args: (uss, uoss)
++syscall sys_utime nr 132 nbargs 2 types: (char *, struct utimbuf *) args: (filename, times)
++syscall sys_mknod nr 133 nbargs 3 types: (const char *, umode_t, unsigned) args: (filename, mode, dev)
++syscall sys_personality nr 135 nbargs 1 types: (unsigned int) args: (personality)
++syscall sys_ustat nr 136 nbargs 2 types: (unsigned, struct ustat *) args: (dev, ubuf)
++syscall sys_statfs nr 137 nbargs 2 types: (const char *, struct statfs *) args: (pathname, buf)
++syscall sys_fstatfs nr 138 nbargs 2 types: (unsigned int, struct statfs *) args: (fd, buf)
++syscall sys_sysfs nr 139 nbargs 3 types: (int, unsigned long, unsigned long) args: (option, arg1, arg2)
++syscall sys_getpriority nr 140 nbargs 2 types: (int, int) args: (which, who)
++syscall sys_setpriority nr 141 nbargs 3 types: (int, int, int) args: (which, who, niceval)
++syscall sys_sched_setparam nr 142 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
++syscall sys_sched_getparam nr 143 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
++syscall sys_sched_setscheduler nr 144 nbargs 3 types: (pid_t, int, struct sched_param *) args: (pid, policy, param)
++syscall sys_sched_getscheduler nr 145 nbargs 1 types: (pid_t) args: (pid)
++syscall sys_sched_get_priority_max nr 146 nbargs 1 types: (int) args: (policy)
++syscall sys_sched_get_priority_min nr 147 nbargs 1 types: (int) args: (policy)
++syscall sys_sched_rr_get_interval nr 148 nbargs 2 types: (pid_t, struct timespec *) args: (pid, interval)
++syscall sys_mlock nr 149 nbargs 2 types: (unsigned long, size_t) args: (start, len)
++syscall sys_munlock nr 150 nbargs 2 types: (unsigned long, size_t) args: (start, len)
++syscall sys_mlockall nr 151 nbargs 1 types: (int) args: (flags)
++syscall sys_munlockall nr 152 nbargs 0 types: () args: ()
++syscall sys_vhangup nr 153 nbargs 0 types: () args: ()
++syscall sys_pivot_root nr 155 nbargs 2 types: (const char *, const char *) args: (new_root, put_old)
++syscall sys_sysctl nr 156 nbargs 1 types: (struct __sysctl_args *) args: (args)
++syscall sys_prctl nr 157 nbargs 5 types: (int, unsigned long, unsigned long, unsigned long, unsigned long) args: (option, arg2, arg3, arg4, arg5)
++syscall sys_adjtimex nr 159 nbargs 1 types: (struct timex *) args: (txc_p)
++syscall sys_setrlimit nr 160 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
++syscall sys_chroot nr 161 nbargs 1 types: (const char *) args: (filename)
++syscall sys_sync nr 162 nbargs 0 types: () args: ()
++syscall sys_acct nr 163 nbargs 1 types: (const char *) args: (name)
++syscall sys_settimeofday nr 164 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
++syscall sys_mount nr 165 nbargs 5 types: (char *, char *, char *, unsigned long, void *) args: (dev_name, dir_name, type, flags, data)
++syscall sys_umount nr 166 nbargs 2 types: (char *, int) args: (name, flags)
++syscall sys_swapon nr 167 nbargs 2 types: (const char *, int) args: (specialfile, swap_flags)
++syscall sys_swapoff nr 168 nbargs 1 types: (const char *) args: (specialfile)
++syscall sys_reboot nr 169 nbargs 4 types: (int, int, unsigned int, void *) args: (magic1, magic2, cmd, arg)
++syscall sys_sethostname nr 170 nbargs 2 types: (char *, int) args: (name, len)
++syscall sys_setdomainname nr 171 nbargs 2 types: (char *, int) args: (name, len)
++syscall sys_init_module nr 175 nbargs 3 types: (void *, unsigned long, const char *) args: (umod, len, uargs)
++syscall sys_delete_module nr 176 nbargs 2 types: (const char *, unsigned int) args: (name_user, flags)
++syscall sys_quotactl nr 179 nbargs 4 types: (unsigned int, const char *, qid_t, void *) args: (cmd, special, id, addr)
++syscall sys_gettid nr 186 nbargs 0 types: () args: ()
++syscall sys_readahead nr 187 nbargs 3 types: (int, loff_t, size_t) args: (fd, offset, count)
++syscall sys_setxattr nr 188 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
++syscall sys_lsetxattr nr 189 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
++syscall sys_fsetxattr nr 190 nbargs 5 types: (int, const char *, const void *, size_t, int) args: (fd, name, value, size, flags)
++syscall sys_getxattr nr 191 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
++syscall sys_lgetxattr nr 192 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
++syscall sys_fgetxattr nr 193 nbargs 4 types: (int, const char *, void *, size_t) args: (fd, name, value, size)
++syscall sys_listxattr nr 194 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
++syscall sys_llistxattr nr 195 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
++syscall sys_flistxattr nr 196 nbargs 3 types: (int, char *, size_t) args: (fd, list, size)
++syscall sys_removexattr nr 197 nbargs 2 types: (const char *, const char *) args: (pathname, name)
++syscall sys_lremovexattr nr 198 nbargs 2 types: (const char *, const char *) args: (pathname, name)
++syscall sys_fremovexattr nr 199 nbargs 2 types: (int, const char *) args: (fd, name)
++syscall sys_tkill nr 200 nbargs 2 types: (pid_t, int) args: (pid, sig)
++syscall sys_time nr 201 nbargs 1 types: (time_t *) args: (tloc)
++syscall sys_futex nr 202 nbargs 6 types: (u32 *, int, u32, struct timespec *, u32 *, u32) args: (uaddr, op, val, utime, uaddr2, val3)
++syscall sys_sched_setaffinity nr 203 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
++syscall sys_sched_getaffinity nr 204 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
++syscall sys_io_setup nr 206 nbargs 2 types: (unsigned, aio_context_t *) args: (nr_events, ctxp)
++syscall sys_io_destroy nr 207 nbargs 1 types: (aio_context_t) args: (ctx)
++syscall sys_io_getevents nr 208 nbargs 5 types: (aio_context_t, long, long, struct io_event *, struct timespec *) args: (ctx_id, min_nr, nr, events, timeout)
++syscall sys_io_submit nr 209 nbargs 3 types: (aio_context_t, long, struct iocb * *) args: (ctx_id, nr, iocbpp)
++syscall sys_io_cancel nr 210 nbargs 3 types: (aio_context_t, struct iocb *, struct io_event *) args: (ctx_id, iocb, result)
++syscall sys_lookup_dcookie nr 212 nbargs 3 types: (u64, char *, size_t) args: (cookie64, buf, len)
++syscall sys_epoll_create nr 213 nbargs 1 types: (int) args: (size)
++syscall sys_remap_file_pages nr 216 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (start, size, prot, pgoff, flags)
++syscall sys_getdents64 nr 217 nbargs 3 types: (unsigned int, struct linux_dirent64 *, unsigned int) args: (fd, dirent, count)
++syscall sys_set_tid_address nr 218 nbargs 1 types: (int *) args: (tidptr)
++syscall sys_restart_syscall nr 219 nbargs 0 types: () args: ()
++syscall sys_semtimedop nr 220 nbargs 4 types: (int, struct sembuf *, unsigned, const struct timespec *) args: (semid, tsops, nsops, timeout)
++syscall sys_fadvise64 nr 221 nbargs 4 types: (int, loff_t, size_t, int) args: (fd, offset, len, advice)
++syscall sys_timer_create nr 222 nbargs 3 types: (const clockid_t, struct sigevent *, timer_t *) args: (which_clock, timer_event_spec, created_timer_id)
++syscall sys_timer_settime nr 223 nbargs 4 types: (timer_t, int, const struct itimerspec *, struct itimerspec *) args: (timer_id, flags, new_setting, old_setting)
++syscall sys_timer_gettime nr 224 nbargs 2 types: (timer_t, struct itimerspec *) args: (timer_id, setting)
++syscall sys_timer_getoverrun nr 225 nbargs 1 types: (timer_t) args: (timer_id)
++syscall sys_timer_delete nr 226 nbargs 1 types: (timer_t) args: (timer_id)
++syscall sys_clock_settime nr 227 nbargs 2 types: (const clockid_t, const struct timespec *) args: (which_clock, tp)
++syscall sys_clock_gettime nr 228 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
++syscall sys_clock_getres nr 229 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
++syscall sys_clock_nanosleep nr 230 nbargs 4 types: (const clockid_t, int, const struct timespec *, struct timespec *) args: (which_clock, flags, rqtp, rmtp)
++syscall sys_exit_group nr 231 nbargs 1 types: (int) args: (error_code)
++syscall sys_epoll_wait nr 232 nbargs 4 types: (int, struct epoll_event *, int, int) args: (epfd, events, maxevents, timeout)
++syscall sys_epoll_ctl nr 233 nbargs 4 types: (int, int, int, struct epoll_event *) args: (epfd, op, fd, event)
++syscall sys_tgkill nr 234 nbargs 3 types: (pid_t, pid_t, int) args: (tgid, pid, sig)
++syscall sys_utimes nr 235 nbargs 2 types: (char *, struct timeval *) args: (filename, utimes)
++syscall sys_mbind nr 237 nbargs 6 types: (unsigned long, unsigned long, unsigned long, unsigned long *, unsigned long, unsigned) args: (start, len, mode, nmask, maxnode, flags)
++syscall sys_set_mempolicy nr 238 nbargs 3 types: (int, unsigned long *, unsigned long) args: (mode, nmask, maxnode)
++syscall sys_get_mempolicy nr 239 nbargs 5 types: (int *, unsigned long *, unsigned long, unsigned long, unsigned long) args: (policy, nmask, maxnode, addr, flags)
++syscall sys_mq_open nr 240 nbargs 4 types: (const char *, int, umode_t, struct mq_attr *) args: (u_name, oflag, mode, u_attr)
++syscall sys_mq_unlink nr 241 nbargs 1 types: (const char *) args: (u_name)
++syscall sys_mq_timedsend nr 242 nbargs 5 types: (mqd_t, const char *, size_t, unsigned int, const struct timespec *) args: (mqdes, u_msg_ptr, msg_len, msg_prio, u_abs_timeout)
++syscall sys_mq_timedreceive nr 243 nbargs 5 types: (mqd_t, char *, size_t, unsigned int *, const struct timespec *) args: (mqdes, u_msg_ptr, msg_len, u_msg_prio, u_abs_timeout)
++syscall sys_mq_notify nr 244 nbargs 2 types: (mqd_t, const struct sigevent *) args: (mqdes, u_notification)
++syscall sys_mq_getsetattr nr 245 nbargs 3 types: (mqd_t, const struct mq_attr *, struct mq_attr *) args: (mqdes, u_mqstat, u_omqstat)
++syscall sys_kexec_load nr 246 nbargs 4 types: (unsigned long, unsigned long, struct kexec_segment *, unsigned long) args: (entry, nr_segments, segments, flags)
++syscall sys_waitid nr 247 nbargs 5 types: (int, pid_t, struct siginfo *, int, struct rusage *) args: (which, upid, infop, options, ru)
++syscall sys_add_key nr 248 nbargs 5 types: (const char *, const char *, const void *, size_t, key_serial_t) args: (_type, _description, _payload, plen, ringid)
++syscall sys_request_key nr 249 nbargs 4 types: (const char *, const char *, const char *, key_serial_t) args: (_type, _description, _callout_info, destringid)
++syscall sys_keyctl nr 250 nbargs 5 types: (int, unsigned long, unsigned long, unsigned long, unsigned long) args: (option, arg2, arg3, arg4, arg5)
++syscall sys_ioprio_set nr 251 nbargs 3 types: (int, int, int) args: (which, who, ioprio)
++syscall sys_ioprio_get nr 252 nbargs 2 types: (int, int) args: (which, who)
++syscall sys_inotify_init nr 253 nbargs 0 types: () args: ()
++syscall sys_inotify_add_watch nr 254 nbargs 3 types: (int, const char *, u32) args: (fd, pathname, mask)
++syscall sys_inotify_rm_watch nr 255 nbargs 2 types: (int, __s32) args: (fd, wd)
++syscall sys_migrate_pages nr 256 nbargs 4 types: (pid_t, unsigned long, const unsigned long *, const unsigned long *) args: (pid, maxnode, old_nodes, new_nodes)
++syscall sys_openat nr 257 nbargs 4 types: (int, const char *, int, umode_t) args: (dfd, filename, flags, mode)
++syscall sys_mkdirat nr 258 nbargs 3 types: (int, const char *, umode_t) args: (dfd, pathname, mode)
++syscall sys_mknodat nr 259 nbargs 4 types: (int, const char *, umode_t, unsigned) args: (dfd, filename, mode, dev)
++syscall sys_fchownat nr 260 nbargs 5 types: (int, const char *, uid_t, gid_t, int) args: (dfd, filename, user, group, flag)
++syscall sys_futimesat nr 261 nbargs 3 types: (int, const char *, struct timeval *) args: (dfd, filename, utimes)
++syscall sys_newfstatat nr 262 nbargs 4 types: (int, const char *, struct stat *, int) args: (dfd, filename, statbuf, flag)
++syscall sys_unlinkat nr 263 nbargs 3 types: (int, const char *, int) args: (dfd, pathname, flag)
++syscall sys_renameat nr 264 nbargs 4 types: (int, const char *, int, const char *) args: (olddfd, oldname, newdfd, newname)
++syscall sys_linkat nr 265 nbargs 5 types: (int, const char *, int, const char *, int) args: (olddfd, oldname, newdfd, newname, flags)
++syscall sys_symlinkat nr 266 nbargs 3 types: (const char *, int, const char *) args: (oldname, newdfd, newname)
++syscall sys_readlinkat nr 267 nbargs 4 types: (int, const char *, char *, int) args: (dfd, pathname, buf, bufsiz)
++syscall sys_fchmodat nr 268 nbargs 3 types: (int, const char *, umode_t) args: (dfd, filename, mode)
++syscall sys_faccessat nr 269 nbargs 3 types: (int, const char *, int) args: (dfd, filename, mode)
++syscall sys_pselect6 nr 270 nbargs 6 types: (int, fd_set *, fd_set *, fd_set *, struct timespec *, void *) args: (n, inp, outp, exp, tsp, sig)
++syscall sys_ppoll nr 271 nbargs 5 types: (struct pollfd *, unsigned int, struct timespec *, const sigset_t *, size_t) args: (ufds, nfds, tsp, sigmask, sigsetsize)
++syscall sys_unshare nr 272 nbargs 1 types: (unsigned long) args: (unshare_flags)
++syscall sys_set_robust_list nr 273 nbargs 2 types: (struct robust_list_head *, size_t) args: (head, len)
++syscall sys_get_robust_list nr 274 nbargs 3 types: (int, struct robust_list_head * *, size_t *) args: (pid, head_ptr, len_ptr)
++syscall sys_splice nr 275 nbargs 6 types: (int, loff_t *, int, loff_t *, size_t, unsigned int) args: (fd_in, off_in, fd_out, off_out, len, flags)
++syscall sys_tee nr 276 nbargs 4 types: (int, int, size_t, unsigned int) args: (fdin, fdout, len, flags)
++syscall sys_sync_file_range nr 277 nbargs 4 types: (int, loff_t, loff_t, unsigned int) args: (fd, offset, nbytes, flags)
++syscall sys_vmsplice nr 278 nbargs 4 types: (int, const struct iovec *, unsigned long, unsigned int) args: (fd, iov, nr_segs, flags)
++syscall sys_move_pages nr 279 nbargs 6 types: (pid_t, unsigned long, const void * *, const int *, int *, int) args: (pid, nr_pages, pages, nodes, status, flags)
++syscall sys_utimensat nr 280 nbargs 4 types: (int, const char *, struct timespec *, int) args: (dfd, filename, utimes, flags)
++syscall sys_epoll_pwait nr 281 nbargs 6 types: (int, struct epoll_event *, int, int, const sigset_t *, size_t) args: (epfd, events, maxevents, timeout, sigmask, sigsetsize)
++syscall sys_signalfd nr 282 nbargs 3 types: (int, sigset_t *, size_t) args: (ufd, user_mask, sizemask)
++syscall sys_timerfd_create nr 283 nbargs 2 types: (int, int) args: (clockid, flags)
++syscall sys_eventfd nr 284 nbargs 1 types: (unsigned int) args: (count)
++syscall sys_fallocate nr 285 nbargs 4 types: (int, int, loff_t, loff_t) args: (fd, mode, offset, len)
++syscall sys_timerfd_settime nr 286 nbargs 4 types: (int, int, const struct itimerspec *, struct itimerspec *) args: (ufd, flags, utmr, otmr)
++syscall sys_timerfd_gettime nr 287 nbargs 2 types: (int, struct itimerspec *) args: (ufd, otmr)
++syscall sys_accept4 nr 288 nbargs 4 types: (int, struct sockaddr *, int *, int) args: (fd, upeer_sockaddr, upeer_addrlen, flags)
++syscall sys_signalfd4 nr 289 nbargs 4 types: (int, sigset_t *, size_t, int) args: (ufd, user_mask, sizemask, flags)
++syscall sys_eventfd2 nr 290 nbargs 2 types: (unsigned int, int) args: (count, flags)
++syscall sys_epoll_create1 nr 291 nbargs 1 types: (int) args: (flags)
++syscall sys_dup3 nr 292 nbargs 3 types: (unsigned int, unsigned int, int) args: (oldfd, newfd, flags)
++syscall sys_pipe2 nr 293 nbargs 2 types: (int *, int) args: (fildes, flags)
++syscall sys_inotify_init1 nr 294 nbargs 1 types: (int) args: (flags)
++syscall sys_preadv nr 295 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
++syscall sys_pwritev nr 296 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
++syscall sys_rt_tgsigqueueinfo nr 297 nbargs 4 types: (pid_t, pid_t, int, siginfo_t *) args: (tgid, pid, sig, uinfo)
++syscall sys_perf_event_open nr 298 nbargs 5 types: (struct perf_event_attr *, pid_t, int, int, unsigned long) args: (attr_uptr, pid, cpu, group_fd, flags)
++syscall sys_recvmmsg nr 299 nbargs 5 types: (int, struct mmsghdr *, unsigned int, unsigned int, struct timespec *) args: (fd, mmsg, vlen, flags, timeout)
++syscall sys_fanotify_init nr 300 nbargs 2 types: (unsigned int, unsigned int) args: (flags, event_f_flags)
++syscall sys_fanotify_mark nr 301 nbargs 5 types: (int, unsigned int, __u64, int, const char *) args: (fanotify_fd, flags, mask, dfd, pathname)
++syscall sys_prlimit64 nr 302 nbargs 4 types: (pid_t, unsigned int, const struct rlimit64 *, struct rlimit64 *) args: (pid, resource, new_rlim, old_rlim)
++syscall sys_name_to_handle_at nr 303 nbargs 5 types: (int, const char *, struct file_handle *, int *, int) args: (dfd, name, handle, mnt_id, flag)
++syscall sys_open_by_handle_at nr 304 nbargs 3 types: (int, struct file_handle *, int) args: (mountdirfd, handle, flags)
++syscall sys_clock_adjtime nr 305 nbargs 2 types: (const clockid_t, struct timex *) args: (which_clock, utx)
++syscall sys_syncfs nr 306 nbargs 1 types: (int) args: (fd)
++syscall sys_sendmmsg nr 307 nbargs 4 types: (int, struct mmsghdr *, unsigned int, unsigned int) args: (fd, mmsg, vlen, flags)
++syscall sys_setns nr 308 nbargs 2 types: (int, int) args: (fd, nstype)
++syscall sys_getcpu nr 309 nbargs 3 types: (unsigned *, unsigned *, struct getcpu_cache *) args: (cpup, nodep, unused)
++syscall sys_process_vm_readv nr 310 nbargs 6 types: (pid_t, const struct iovec *, unsigned long, const struct iovec *, unsigned long, unsigned long) args: (pid, lvec, liovcnt, rvec, riovcnt, flags)
++syscall sys_process_vm_writev nr 311 nbargs 6 types: (pid_t, const struct iovec *, unsigned long, const struct iovec *, unsigned long, unsigned long) args: (pid, lvec, liovcnt, rvec, riovcnt, flags)
++syscall sys_finit_module nr 313 nbargs 3 types: (int, const char *, int) args: (fd, uargs, flags)
++SUCCESS
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/3.4.25/arm-32-syscalls-3.4.25
+@@ -0,0 +1,299 @@
++syscall sys_restart_syscall nr 0 nbargs 0 types: () args: ()
++syscall sys_exit nr 1 nbargs 1 types: (int) args: (error_code)
++syscall sys_read nr 3 nbargs 3 types: (unsigned int, char *, size_t) args: (fd, buf, count)
++syscall sys_write nr 4 nbargs 3 types: (unsigned int, const char *, size_t) args: (fd, buf, count)
++syscall sys_open nr 5 nbargs 3 types: (const char *, int, umode_t) args: (filename, flags, mode)
++syscall sys_close nr 6 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_creat nr 8 nbargs 2 types: (const char *, umode_t) args: (pathname, mode)
++syscall sys_link nr 9 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
++syscall sys_unlink nr 10 nbargs 1 types: (const char *) args: (pathname)
++syscall sys_chdir nr 12 nbargs 1 types: (const char *) args: (filename)
++syscall sys_mknod nr 14 nbargs 3 types: (const char *, umode_t, unsigned) args: (filename, mode, dev)
++syscall sys_chmod nr 15 nbargs 2 types: (const char *, umode_t) args: (filename, mode)
++syscall sys_lchown16 nr 16 nbargs 3 types: (const char *, old_uid_t, old_gid_t) args: (filename, user, group)
++syscall sys_lseek nr 19 nbargs 3 types: (unsigned int, off_t, unsigned int) args: (fd, offset, origin)
++syscall sys_getpid nr 20 nbargs 0 types: () args: ()
++syscall sys_mount nr 21 nbargs 5 types: (char *, char *, char *, unsigned long, void *) args: (dev_name, dir_name, type, flags, data)
++syscall sys_setuid16 nr 23 nbargs 1 types: (old_uid_t) args: (uid)
++syscall sys_getuid16 nr 24 nbargs 0 types: () args: ()
++syscall sys_ptrace nr 26 nbargs 4 types: (long, long, unsigned long, unsigned long) args: (request, pid, addr, data)
++syscall sys_pause nr 29 nbargs 0 types: () args: ()
++syscall sys_access nr 33 nbargs 2 types: (const char *, int) args: (filename, mode)
++syscall sys_nice nr 34 nbargs 1 types: (int) args: (increment)
++syscall sys_sync nr 36 nbargs 0 types: () args: ()
++syscall sys_kill nr 37 nbargs 2 types: (pid_t, int) args: (pid, sig)
++syscall sys_rename nr 38 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
++syscall sys_mkdir nr 39 nbargs 2 types: (const char *, umode_t) args: (pathname, mode)
++syscall sys_rmdir nr 40 nbargs 1 types: (const char *) args: (pathname)
++syscall sys_dup nr 41 nbargs 1 types: (unsigned int) args: (fildes)
++syscall sys_pipe nr 42 nbargs 1 types: (int *) args: (fildes)
++syscall sys_times nr 43 nbargs 1 types: (struct tms *) args: (tbuf)
++syscall sys_brk nr 45 nbargs 1 types: (unsigned long) args: (brk)
++syscall sys_setgid16 nr 46 nbargs 1 types: (old_gid_t) args: (gid)
++syscall sys_getgid16 nr 47 nbargs 0 types: () args: ()
++syscall sys_geteuid16 nr 49 nbargs 0 types: () args: ()
++syscall sys_getegid16 nr 50 nbargs 0 types: () args: ()
++syscall sys_acct nr 51 nbargs 1 types: (const char *) args: (name)
++syscall sys_umount nr 52 nbargs 2 types: (char *, int) args: (name, flags)
++syscall sys_ioctl nr 54 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
++syscall sys_fcntl nr 55 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
++syscall sys_setpgid nr 57 nbargs 2 types: (pid_t, pid_t) args: (pid, pgid)
++syscall sys_umask nr 60 nbargs 1 types: (int) args: (mask)
++syscall sys_chroot nr 61 nbargs 1 types: (const char *) args: (filename)
++syscall sys_ustat nr 62 nbargs 2 types: (unsigned, struct ustat *) args: (dev, ubuf)
++syscall sys_dup2 nr 63 nbargs 2 types: (unsigned int, unsigned int) args: (oldfd, newfd)
++syscall sys_getppid nr 64 nbargs 0 types: () args: ()
++syscall sys_getpgrp nr 65 nbargs 0 types: () args: ()
++syscall sys_setsid nr 66 nbargs 0 types: () args: ()
++syscall sys_setreuid16 nr 70 nbargs 2 types: (old_uid_t, old_uid_t) args: (ruid, euid)
++syscall sys_setregid16 nr 71 nbargs 2 types: (old_gid_t, old_gid_t) args: (rgid, egid)
++syscall sys_sigpending nr 73 nbargs 1 types: (old_sigset_t *) args: (set)
++syscall sys_sethostname nr 74 nbargs 2 types: (char *, int) args: (name, len)
++syscall sys_setrlimit nr 75 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
++syscall sys_getrusage nr 77 nbargs 2 types: (int, struct rusage *) args: (who, ru)
++syscall sys_gettimeofday nr 78 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
++syscall sys_settimeofday nr 79 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
++syscall sys_getgroups16 nr 80 nbargs 2 types: (int, old_gid_t *) args: (gidsetsize, grouplist)
++syscall sys_setgroups16 nr 81 nbargs 2 types: (int, old_gid_t *) args: (gidsetsize, grouplist)
++syscall sys_symlink nr 83 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
++syscall sys_readlink nr 85 nbargs 3 types: (const char *, char *, int) args: (path, buf, bufsiz)
++syscall sys_uselib nr 86 nbargs 1 types: (const char *) args: (library)
++syscall sys_swapon nr 87 nbargs 2 types: (const char *, int) args: (specialfile, swap_flags)
++syscall sys_reboot nr 88 nbargs 4 types: (int, int, unsigned int, void *) args: (magic1, magic2, cmd, arg)
++syscall sys_munmap nr 91 nbargs 2 types: (unsigned long, size_t) args: (addr, len)
++syscall sys_truncate nr 92 nbargs 2 types: (const char *, long) args: (path, length)
++syscall sys_ftruncate nr 93 nbargs 2 types: (unsigned int, unsigned long) args: (fd, length)
++syscall sys_fchmod nr 94 nbargs 2 types: (unsigned int, umode_t) args: (fd, mode)
++syscall sys_fchown16 nr 95 nbargs 3 types: (unsigned int, old_uid_t, old_gid_t) args: (fd, user, group)
++syscall sys_getpriority nr 96 nbargs 2 types: (int, int) args: (which, who)
++syscall sys_setpriority nr 97 nbargs 3 types: (int, int, int) args: (which, who, niceval)
++syscall sys_statfs nr 99 nbargs 2 types: (const char *, struct statfs *) args: (pathname, buf)
++syscall sys_fstatfs nr 100 nbargs 2 types: (unsigned int, struct statfs *) args: (fd, buf)
++syscall sys_syslog nr 103 nbargs 3 types: (int, char *, int) args: (type, buf, len)
++syscall sys_setitimer nr 104 nbargs 3 types: (int, struct itimerval *, struct itimerval *) args: (which, value, ovalue)
++syscall sys_getitimer nr 105 nbargs 2 types: (int, struct itimerval *) args: (which, value)
++syscall sys_newstat nr 106 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
++syscall sys_newlstat nr 107 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
++syscall sys_newfstat nr 108 nbargs 2 types: (unsigned int, struct stat *) args: (fd, statbuf)
++syscall sys_vhangup nr 111 nbargs 0 types: () args: ()
++syscall sys_wait4 nr 114 nbargs 4 types: (pid_t, int *, int, struct rusage *) args: (upid, stat_addr, options, ru)
++syscall sys_swapoff nr 115 nbargs 1 types: (const char *) args: (specialfile)
++syscall sys_sysinfo nr 116 nbargs 1 types: (struct sysinfo *) args: (info)
++syscall sys_fsync nr 118 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_setdomainname nr 121 nbargs 2 types: (char *, int) args: (name, len)
++syscall sys_newuname nr 122 nbargs 1 types: (struct new_utsname *) args: (name)
++syscall sys_adjtimex nr 124 nbargs 1 types: (struct timex *) args: (txc_p)
++syscall sys_mprotect nr 125 nbargs 3 types: (unsigned long, size_t, unsigned long) args: (start, len, prot)
++syscall sys_sigprocmask nr 126 nbargs 3 types: (int, old_sigset_t *, old_sigset_t *) args: (how, nset, oset)
++syscall sys_init_module nr 128 nbargs 3 types: (void *, unsigned long, const char *) args: (umod, len, uargs)
++syscall sys_delete_module nr 129 nbargs 2 types: (const char *, unsigned int) args: (name_user, flags)
++syscall sys_quotactl nr 131 nbargs 4 types: (unsigned int, const char *, qid_t, void *) args: (cmd, special, id, addr)
++syscall sys_getpgid nr 132 nbargs 1 types: (pid_t) args: (pid)
++syscall sys_fchdir nr 133 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_bdflush nr 134 nbargs 2 types: (int, long) args: (func, data)
++syscall sys_sysfs nr 135 nbargs 3 types: (int, unsigned long, unsigned long) args: (option, arg1, arg2)
++syscall sys_personality nr 136 nbargs 1 types: (unsigned int) args: (personality)
++syscall sys_setfsuid16 nr 138 nbargs 1 types: (old_uid_t) args: (uid)
++syscall sys_setfsgid16 nr 139 nbargs 1 types: (old_gid_t) args: (gid)
++syscall sys_llseek nr 140 nbargs 5 types: (unsigned int, unsigned long, unsigned long, loff_t *, unsigned int) args: (fd, offset_high, offset_low, result, origin)
++syscall sys_getdents nr 141 nbargs 3 types: (unsigned int, struct linux_dirent *, unsigned int) args: (fd, dirent, count)
++syscall sys_select nr 142 nbargs 5 types: (int, fd_set *, fd_set *, fd_set *, struct timeval *) args: (n, inp, outp, exp, tvp)
++syscall sys_flock nr 143 nbargs 2 types: (unsigned int, unsigned int) args: (fd, cmd)
++syscall sys_msync nr 144 nbargs 3 types: (unsigned long, size_t, int) args: (start, len, flags)
++syscall sys_readv nr 145 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
++syscall sys_writev nr 146 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
++syscall sys_getsid nr 147 nbargs 1 types: (pid_t) args: (pid)
++syscall sys_fdatasync nr 148 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_sysctl nr 149 nbargs 1 types: (struct __sysctl_args *) args: (args)
++syscall sys_mlock nr 150 nbargs 2 types: (unsigned long, size_t) args: (start, len)
++syscall sys_munlock nr 151 nbargs 2 types: (unsigned long, size_t) args: (start, len)
++syscall sys_mlockall nr 152 nbargs 1 types: (int) args: (flags)
++syscall sys_munlockall nr 153 nbargs 0 types: () args: ()
++syscall sys_sched_setparam nr 154 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
++syscall sys_sched_getparam nr 155 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
++syscall sys_sched_setscheduler nr 156 nbargs 3 types: (pid_t, int, struct sched_param *) args: (pid, policy, param)
++syscall sys_sched_getscheduler nr 157 nbargs 1 types: (pid_t) args: (pid)
++syscall sys_sched_yield nr 158 nbargs 0 types: () args: ()
++syscall sys_sched_get_priority_max nr 159 nbargs 1 types: (int) args: (policy)
++syscall sys_sched_get_priority_min nr 160 nbargs 1 types: (int) args: (policy)
++syscall sys_sched_rr_get_interval nr 161 nbargs 2 types: (pid_t, struct timespec *) args: (pid, interval)
++syscall sys_nanosleep nr 162 nbargs 2 types: (struct timespec *, struct timespec *) args: (rqtp, rmtp)
++syscall sys_mremap nr 163 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (addr, old_len, new_len, flags, new_addr)
++syscall sys_setresuid16 nr 164 nbargs 3 types: (old_uid_t, old_uid_t, old_uid_t) args: (ruid, euid, suid)
++syscall sys_getresuid16 nr 165 nbargs 3 types: (old_uid_t *, old_uid_t *, old_uid_t *) args: (ruid, euid, suid)
++syscall sys_poll nr 168 nbargs 3 types: (struct pollfd *, unsigned int, int) args: (ufds, nfds, timeout_msecs)
++syscall sys_setresgid16 nr 170 nbargs 3 types: (old_gid_t, old_gid_t, old_gid_t) args: (rgid, egid, sgid)
++syscall sys_getresgid16 nr 171 nbargs 3 types: (old_gid_t *, old_gid_t *, old_gid_t *) args: (rgid, egid, sgid)
++syscall sys_prctl nr 172 nbargs 5 types: (int, unsigned long, unsigned long, unsigned long, unsigned long) args: (option, arg2, arg3, arg4, arg5)
++syscall sys_rt_sigaction nr 174 nbargs 4 types: (int, const struct sigaction *, struct sigaction *, size_t) args: (sig, act, oact, sigsetsize)
++syscall sys_rt_sigprocmask nr 175 nbargs 4 types: (int, sigset_t *, sigset_t *, size_t) args: (how, nset, oset, sigsetsize)
++syscall sys_rt_sigpending nr 176 nbargs 2 types: (sigset_t *, size_t) args: (set, sigsetsize)
++syscall sys_rt_sigtimedwait nr 177 nbargs 4 types: (const sigset_t *, siginfo_t *, const struct timespec *, size_t) args: (uthese, uinfo, uts, sigsetsize)
++syscall sys_rt_sigqueueinfo nr 178 nbargs 3 types: (pid_t, int, siginfo_t *) args: (pid, sig, uinfo)
++syscall sys_rt_sigsuspend nr 179 nbargs 2 types: (sigset_t *, size_t) args: (unewset, sigsetsize)
++syscall sys_chown16 nr 182 nbargs 3 types: (const char *, old_uid_t, old_gid_t) args: (filename, user, group)
++syscall sys_getcwd nr 183 nbargs 2 types: (char *, unsigned long) args: (buf, size)
++syscall sys_capget nr 184 nbargs 2 types: (cap_user_header_t, cap_user_data_t) args: (header, dataptr)
++syscall sys_capset nr 185 nbargs 2 types: (cap_user_header_t, const cap_user_data_t) args: (header, data)
++syscall sys_sendfile nr 187 nbargs 4 types: (int, int, off_t *, size_t) args: (out_fd, in_fd, offset, count)
++syscall sys_getrlimit nr 191 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
++syscall sys_stat64 nr 195 nbargs 2 types: (const char *, struct stat64 *) args: (filename, statbuf)
++syscall sys_lstat64 nr 196 nbargs 2 types: (const char *, struct stat64 *) args: (filename, statbuf)
++syscall sys_fstat64 nr 197 nbargs 2 types: (unsigned long, struct stat64 *) args: (fd, statbuf)
++syscall sys_lchown nr 198 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
++syscall sys_getuid nr 199 nbargs 0 types: () args: ()
++syscall sys_getgid nr 200 nbargs 0 types: () args: ()
++syscall sys_geteuid nr 201 nbargs 0 types: () args: ()
++syscall sys_getegid nr 202 nbargs 0 types: () args: ()
++syscall sys_setreuid nr 203 nbargs 2 types: (uid_t, uid_t) args: (ruid, euid)
++syscall sys_setregid nr 204 nbargs 2 types: (gid_t, gid_t) args: (rgid, egid)
++syscall sys_getgroups nr 205 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
++syscall sys_setgroups nr 206 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
++syscall sys_fchown nr 207 nbargs 3 types: (unsigned int, uid_t, gid_t) args: (fd, user, group)
++syscall sys_setresuid nr 208 nbargs 3 types: (uid_t, uid_t, uid_t) args: (ruid, euid, suid)
++syscall sys_getresuid nr 209 nbargs 3 types: (uid_t *, uid_t *, uid_t *) args: (ruid, euid, suid)
++syscall sys_setresgid nr 210 nbargs 3 types: (gid_t, gid_t, gid_t) args: (rgid, egid, sgid)
++syscall sys_getresgid nr 211 nbargs 3 types: (gid_t *, gid_t *, gid_t *) args: (rgid, egid, sgid)
++syscall sys_chown nr 212 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
++syscall sys_setuid nr 213 nbargs 1 types: (uid_t) args: (uid)
++syscall sys_setgid nr 214 nbargs 1 types: (gid_t) args: (gid)
++syscall sys_setfsuid nr 215 nbargs 1 types: (uid_t) args: (uid)
++syscall sys_setfsgid nr 216 nbargs 1 types: (gid_t) args: (gid)
++syscall sys_getdents64 nr 217 nbargs 3 types: (unsigned int, struct linux_dirent64 *, unsigned int) args: (fd, dirent, count)
++syscall sys_pivot_root nr 218 nbargs 2 types: (const char *, const char *) args: (new_root, put_old)
++syscall sys_mincore nr 219 nbargs 3 types: (unsigned long, size_t, unsigned char *) args: (start, len, vec)
++syscall sys_madvise nr 220 nbargs 3 types: (unsigned long, size_t, int) args: (start, len_in, behavior)
++syscall sys_fcntl64 nr 221 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
++syscall sys_gettid nr 224 nbargs 0 types: () args: ()
++syscall sys_setxattr nr 226 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
++syscall sys_lsetxattr nr 227 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
++syscall sys_fsetxattr nr 228 nbargs 5 types: (int, const char *, const void *, size_t, int) args: (fd, name, value, size, flags)
++syscall sys_getxattr nr 229 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
++syscall sys_lgetxattr nr 230 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
++syscall sys_fgetxattr nr 231 nbargs 4 types: (int, const char *, void *, size_t) args: (fd, name, value, size)
++syscall sys_listxattr nr 232 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
++syscall sys_llistxattr nr 233 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
++syscall sys_flistxattr nr 234 nbargs 3 types: (int, char *, size_t) args: (fd, list, size)
++syscall sys_removexattr nr 235 nbargs 2 types: (const char *, const char *) args: (pathname, name)
++syscall sys_lremovexattr nr 236 nbargs 2 types: (const char *, const char *) args: (pathname, name)
++syscall sys_fremovexattr nr 237 nbargs 2 types: (int, const char *) args: (fd, name)
++syscall sys_tkill nr 238 nbargs 2 types: (pid_t, int) args: (pid, sig)
++syscall sys_sendfile64 nr 239 nbargs 4 types: (int, int, loff_t *, size_t) args: (out_fd, in_fd, offset, count)
++syscall sys_futex nr 240 nbargs 6 types: (u32 *, int, u32, struct timespec *, u32 *, u32) args: (uaddr, op, val, utime, uaddr2, val3)
++syscall sys_sched_setaffinity nr 241 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
++syscall sys_sched_getaffinity nr 242 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
++syscall sys_io_setup nr 243 nbargs 2 types: (unsigned, aio_context_t *) args: (nr_events, ctxp)
++syscall sys_io_destroy nr 244 nbargs 1 types: (aio_context_t) args: (ctx)
++syscall sys_io_getevents nr 245 nbargs 5 types: (aio_context_t, long, long, struct io_event *, struct timespec *) args: (ctx_id, min_nr, nr, events, timeout)
++syscall sys_io_submit nr 246 nbargs 3 types: (aio_context_t, long, struct iocb * *) args: (ctx_id, nr, iocbpp)
++syscall sys_io_cancel nr 247 nbargs 3 types: (aio_context_t, struct iocb *, struct io_event *) args: (ctx_id, iocb, result)
++syscall sys_exit_group nr 248 nbargs 1 types: (int) args: (error_code)
++syscall sys_epoll_create nr 250 nbargs 1 types: (int) args: (size)
++syscall sys_epoll_ctl nr 251 nbargs 4 types: (int, int, int, struct epoll_event *) args: (epfd, op, fd, event)
++syscall sys_epoll_wait nr 252 nbargs 4 types: (int, struct epoll_event *, int, int) args: (epfd, events, maxevents, timeout)
++syscall sys_remap_file_pages nr 253 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (start, size, prot, pgoff, flags)
++syscall sys_set_tid_address nr 256 nbargs 1 types: (int *) args: (tidptr)
++syscall sys_timer_create nr 257 nbargs 3 types: (const clockid_t, struct sigevent *, timer_t *) args: (which_clock, timer_event_spec, created_timer_id)
++syscall sys_timer_settime nr 258 nbargs 4 types: (timer_t, int, const struct itimerspec *, struct itimerspec *) args: (timer_id, flags, new_setting, old_setting)
++syscall sys_timer_gettime nr 259 nbargs 2 types: (timer_t, struct itimerspec *) args: (timer_id, setting)
++syscall sys_timer_getoverrun nr 260 nbargs 1 types: (timer_t) args: (timer_id)
++syscall sys_timer_delete nr 261 nbargs 1 types: (timer_t) args: (timer_id)
++syscall sys_clock_settime nr 262 nbargs 2 types: (const clockid_t, const struct timespec *) args: (which_clock, tp)
++syscall sys_clock_gettime nr 263 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
++syscall sys_clock_getres nr 264 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
++syscall sys_clock_nanosleep nr 265 nbargs 4 types: (const clockid_t, int, const struct timespec *, struct timespec *) args: (which_clock, flags, rqtp, rmtp)
++syscall sys_tgkill nr 268 nbargs 3 types: (pid_t, pid_t, int) args: (tgid, pid, sig)
++syscall sys_utimes nr 269 nbargs 2 types: (char *, struct timeval *) args: (filename, utimes)
++syscall sys_mq_open nr 274 nbargs 4 types: (const char *, int, umode_t, struct mq_attr *) args: (u_name, oflag, mode, u_attr)
++syscall sys_mq_unlink nr 275 nbargs 1 types: (const char *) args: (u_name)
++syscall sys_mq_timedsend nr 276 nbargs 5 types: (mqd_t, const char *, size_t, unsigned int, const struct timespec *) args: (mqdes, u_msg_ptr, msg_len, msg_prio, u_abs_timeout)
++syscall sys_mq_timedreceive nr 277 nbargs 5 types: (mqd_t, char *, size_t, unsigned int *, const struct timespec *) args: (mqdes, u_msg_ptr, msg_len, u_msg_prio, u_abs_timeout)
++syscall sys_mq_notify nr 278 nbargs 2 types: (mqd_t, const struct sigevent *) args: (mqdes, u_notification)
++syscall sys_mq_getsetattr nr 279 nbargs 3 types: (mqd_t, const struct mq_attr *, struct mq_attr *) args: (mqdes, u_mqstat, u_omqstat)
++syscall sys_waitid nr 280 nbargs 5 types: (int, pid_t, struct siginfo *, int, struct rusage *) args: (which, upid, infop, options, ru)
++syscall sys_socket nr 281 nbargs 3 types: (int, int, int) args: (family, type, protocol)
++syscall sys_bind nr 282 nbargs 3 types: (int, struct sockaddr *, int) args: (fd, umyaddr, addrlen)
++syscall sys_connect nr 283 nbargs 3 types: (int, struct sockaddr *, int) args: (fd, uservaddr, addrlen)
++syscall sys_listen nr 284 nbargs 2 types: (int, int) args: (fd, backlog)
++syscall sys_accept nr 285 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, upeer_sockaddr, upeer_addrlen)
++syscall sys_getsockname nr 286 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, usockaddr, usockaddr_len)
++syscall sys_getpeername nr 287 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, usockaddr, usockaddr_len)
++syscall sys_socketpair nr 288 nbargs 4 types: (int, int, int, int *) args: (family, type, protocol, usockvec)
++syscall sys_send nr 289 nbargs 4 types: (int, void *, size_t, unsigned) args: (fd, buff, len, flags)
++syscall sys_sendto nr 290 nbargs 6 types: (int, void *, size_t, unsigned, struct sockaddr *, int) args: (fd, buff, len, flags, addr, addr_len)
++syscall sys_recvfrom nr 292 nbargs 6 types: (int, void *, size_t, unsigned, struct sockaddr *, int *) args: (fd, ubuf, size, flags, addr, addr_len)
++syscall sys_shutdown nr 293 nbargs 2 types: (int, int) args: (fd, how)
++syscall sys_setsockopt nr 294 nbargs 5 types: (int, int, int, char *, int) args: (fd, level, optname, optval, optlen)
++syscall sys_getsockopt nr 295 nbargs 5 types: (int, int, int, char *, int *) args: (fd, level, optname, optval, optlen)
++syscall sys_sendmsg nr 296 nbargs 3 types: (int, struct msghdr *, unsigned) args: (fd, msg, flags)
++syscall sys_recvmsg nr 297 nbargs 3 types: (int, struct msghdr *, unsigned int) args: (fd, msg, flags)
++syscall sys_semop nr 298 nbargs 3 types: (int, struct sembuf *, unsigned) args: (semid, tsops, nsops)
++syscall sys_semget nr 299 nbargs 3 types: (key_t, int, int) args: (key, nsems, semflg)
++syscall sys_msgsnd nr 301 nbargs 4 types: (int, struct msgbuf *, size_t, int) args: (msqid, msgp, msgsz, msgflg)
++syscall sys_msgrcv nr 302 nbargs 5 types: (int, struct msgbuf *, size_t, long, int) args: (msqid, msgp, msgsz, msgtyp, msgflg)
++syscall sys_msgget nr 303 nbargs 2 types: (key_t, int) args: (key, msgflg)
++syscall sys_msgctl nr 304 nbargs 3 types: (int, int, struct msqid_ds *) args: (msqid, cmd, buf)
++syscall sys_shmat nr 305 nbargs 3 types: (int, char *, int) args: (shmid, shmaddr, shmflg)
++syscall sys_shmdt nr 306 nbargs 1 types: (char *) args: (shmaddr)
++syscall sys_shmget nr 307 nbargs 3 types: (key_t, size_t, int) args: (key, size, shmflg)
++syscall sys_shmctl nr 308 nbargs 3 types: (int, int, struct shmid_ds *) args: (shmid, cmd, buf)
++syscall sys_add_key nr 309 nbargs 5 types: (const char *, const char *, const void *, size_t, key_serial_t) args: (_type, _description, _payload, plen, ringid)
++syscall sys_request_key nr 310 nbargs 4 types: (const char *, const char *, const char *, key_serial_t) args: (_type, _description, _callout_info, destringid)
++syscall sys_keyctl nr 311 nbargs 5 types: (int, unsigned long, unsigned long, unsigned long, unsigned long) args: (option, arg2, arg3, arg4, arg5)
++syscall sys_semtimedop nr 312 nbargs 4 types: (int, struct sembuf *, unsigned, const struct timespec *) args: (semid, tsops, nsops, timeout)
++syscall sys_ioprio_set nr 314 nbargs 3 types: (int, int, int) args: (which, who, ioprio)
++syscall sys_ioprio_get nr 315 nbargs 2 types: (int, int) args: (which, who)
++syscall sys_inotify_init nr 316 nbargs 0 types: () args: ()
++syscall sys_inotify_add_watch nr 317 nbargs 3 types: (int, const char *, u32) args: (fd, pathname, mask)
++syscall sys_inotify_rm_watch nr 318 nbargs 2 types: (int, __s32) args: (fd, wd)
++syscall sys_openat nr 322 nbargs 4 types: (int, const char *, int, umode_t) args: (dfd, filename, flags, mode)
++syscall sys_mkdirat nr 323 nbargs 3 types: (int, const char *, umode_t) args: (dfd, pathname, mode)
++syscall sys_mknodat nr 324 nbargs 4 types: (int, const char *, umode_t, unsigned) args: (dfd, filename, mode, dev)
++syscall sys_fchownat nr 325 nbargs 5 types: (int, const char *, uid_t, gid_t, int) args: (dfd, filename, user, group, flag)
++syscall sys_futimesat nr 326 nbargs 3 types: (int, const char *, struct timeval *) args: (dfd, filename, utimes)
++syscall sys_fstatat64 nr 327 nbargs 4 types: (int, const char *, struct stat64 *, int) args: (dfd, filename, statbuf, flag)
++syscall sys_unlinkat nr 328 nbargs 3 types: (int, const char *, int) args: (dfd, pathname, flag)
++syscall sys_renameat nr 329 nbargs 4 types: (int, const char *, int, const char *) args: (olddfd, oldname, newdfd, newname)
++syscall sys_linkat nr 330 nbargs 5 types: (int, const char *, int, const char *, int) args: (olddfd, oldname, newdfd, newname, flags)
++syscall sys_symlinkat nr 331 nbargs 3 types: (const char *, int, const char *) args: (oldname, newdfd, newname)
++syscall sys_readlinkat nr 332 nbargs 4 types: (int, const char *, char *, int) args: (dfd, pathname, buf, bufsiz)
++syscall sys_fchmodat nr 333 nbargs 3 types: (int, const char *, umode_t) args: (dfd, filename, mode)
++syscall sys_faccessat nr 334 nbargs 3 types: (int, const char *, int) args: (dfd, filename, mode)
++syscall sys_pselect6 nr 335 nbargs 6 types: (int, fd_set *, fd_set *, fd_set *, struct timespec *, void *) args: (n, inp, outp, exp, tsp, sig)
++syscall sys_ppoll nr 336 nbargs 5 types: (struct pollfd *, unsigned int, struct timespec *, const sigset_t *, size_t) args: (ufds, nfds, tsp, sigmask, sigsetsize)
++syscall sys_unshare nr 337 nbargs 1 types: (unsigned long) args: (unshare_flags)
++syscall sys_set_robust_list nr 338 nbargs 2 types: (struct robust_list_head *, size_t) args: (head, len)
++syscall sys_get_robust_list nr 339 nbargs 3 types: (int, struct robust_list_head * *, size_t *) args: (pid, head_ptr, len_ptr)
++syscall sys_splice nr 340 nbargs 6 types: (int, loff_t *, int, loff_t *, size_t, unsigned int) args: (fd_in, off_in, fd_out, off_out, len, flags)
++syscall sys_tee nr 342 nbargs 4 types: (int, int, size_t, unsigned int) args: (fdin, fdout, len, flags)
++syscall sys_vmsplice nr 343 nbargs 4 types: (int, const struct iovec *, unsigned long, unsigned int) args: (fd, iov, nr_segs, flags)
++syscall sys_getcpu nr 345 nbargs 3 types: (unsigned *, unsigned *, struct getcpu_cache *) args: (cpup, nodep, unused)
++syscall sys_epoll_pwait nr 346 nbargs 6 types: (int, struct epoll_event *, int, int, const sigset_t *, size_t) args: (epfd, events, maxevents, timeout, sigmask, sigsetsize)
++syscall sys_utimensat nr 348 nbargs 4 types: (int, const char *, struct timespec *, int) args: (dfd, filename, utimes, flags)
++syscall sys_signalfd nr 349 nbargs 3 types: (int, sigset_t *, size_t) args: (ufd, user_mask, sizemask)
++syscall sys_timerfd_create nr 350 nbargs 2 types: (int, int) args: (clockid, flags)
++syscall sys_eventfd nr 351 nbargs 1 types: (unsigned int) args: (count)
++syscall sys_timerfd_settime nr 353 nbargs 4 types: (int, int, const struct itimerspec *, struct itimerspec *) args: (ufd, flags, utmr, otmr)
++syscall sys_timerfd_gettime nr 354 nbargs 2 types: (int, struct itimerspec *) args: (ufd, otmr)
++syscall sys_signalfd4 nr 355 nbargs 4 types: (int, sigset_t *, size_t, int) args: (ufd, user_mask, sizemask, flags)
++syscall sys_eventfd2 nr 356 nbargs 2 types: (unsigned int, int) args: (count, flags)
++syscall sys_epoll_create1 nr 357 nbargs 1 types: (int) args: (flags)
++syscall sys_dup3 nr 358 nbargs 3 types: (unsigned int, unsigned int, int) args: (oldfd, newfd, flags)
++syscall sys_pipe2 nr 359 nbargs 2 types: (int *, int) args: (fildes, flags)
++syscall sys_inotify_init1 nr 360 nbargs 1 types: (int) args: (flags)
++syscall sys_preadv nr 361 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
++syscall sys_pwritev nr 362 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
++syscall sys_rt_tgsigqueueinfo nr 363 nbargs 4 types: (pid_t, pid_t, int, siginfo_t *) args: (tgid, pid, sig, uinfo)
++syscall sys_perf_event_open nr 364 nbargs 5 types: (struct perf_event_attr *, pid_t, int, int, unsigned long) args: (attr_uptr, pid, cpu, group_fd, flags)
++syscall sys_recvmmsg nr 365 nbargs 5 types: (int, struct mmsghdr *, unsigned int, unsigned int, struct timespec *) args: (fd, mmsg, vlen, flags, timeout)
++syscall sys_accept4 nr 366 nbargs 4 types: (int, struct sockaddr *, int *, int) args: (fd, upeer_sockaddr, upeer_addrlen, flags)
++syscall sys_fanotify_init nr 367 nbargs 2 types: (unsigned int, unsigned int) args: (flags, event_f_flags)
++syscall sys_prlimit64 nr 369 nbargs 4 types: (pid_t, unsigned int, const struct rlimit64 *, struct rlimit64 *) args: (pid, resource, new_rlim, old_rlim)
++syscall sys_name_to_handle_at nr 370 nbargs 5 types: (int, const char *, struct file_handle *, int *, int) args: (dfd, name, handle, mnt_id, flag)
++syscall sys_open_by_handle_at nr 371 nbargs 3 types: (int, struct file_handle *, int) args: (mountdirfd, handle, flags)
++syscall sys_clock_adjtime nr 372 nbargs 2 types: (const clockid_t, struct timex *) args: (which_clock, utx)
++syscall sys_syncfs nr 373 nbargs 1 types: (int) args: (fd)
++syscall sys_sendmmsg nr 374 nbargs 4 types: (int, struct mmsghdr *, unsigned int, unsigned int) args: (fd, mmsg, vlen, flags)
++syscall sys_setns nr 375 nbargs 2 types: (int, int) args: (fd, nstype)
++syscall sys_process_vm_readv nr 376 nbargs 6 types: (pid_t, const struct iovec *, unsigned long, const struct iovec *, unsigned long, unsigned long) args: (pid, lvec, liovcnt, rvec, riovcnt, flags)
++syscall sys_process_vm_writev nr 377 nbargs 6 types: (pid_t, const struct iovec *, unsigned long, const struct iovec *, unsigned long, unsigned long) args: (pid, lvec, liovcnt, rvec, riovcnt, flags)
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/3.5.0/mips-32-syscalls-3.5.0
+@@ -0,0 +1,141 @@
++syscall sys_exit nr 4003 nbargs 1 types: (int) args: (error_code)
++syscall sys_read nr 4007 nbargs 3 types: (unsigned int, char *, size_t) args: (fd, buf, count)
++syscall sys_write nr 4009 nbargs 3 types: (unsigned int, const char *, size_t) args: (fd, buf, count)
++syscall sys_open nr 4011 nbargs 3 types: (const char *, int, umode_t) args: (filename, flags, mode)
++syscall sys_close nr 4013 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_waitpid nr 4015 nbargs 3 types: (pid_t, int *, int) args: (pid, stat_addr, options)
++syscall sys_creat nr 4017 nbargs 2 types: (const char *, umode_t) args: (pathname, mode)
++syscall sys_link nr 4019 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
++syscall sys_unlink nr 4021 nbargs 1 types: (const char *) args: (pathname)
++syscall sys_chdir nr 4025 nbargs 1 types: (const char *) args: (filename)
++syscall sys_time nr 4027 nbargs 1 types: (time_t *) args: (tloc)
++syscall sys_mknod nr 4029 nbargs 3 types: (const char *, umode_t, unsigned) args: (filename, mode, dev)
++syscall sys_chmod nr 4031 nbargs 2 types: (const char *, umode_t) args: (filename, mode)
++syscall sys_lchown nr 4033 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
++syscall sys_lseek nr 4039 nbargs 3 types: (unsigned int, off_t, unsigned int) args: (fd, offset, origin)
++syscall sys_getpid nr 4041 nbargs 0 types: () args: ()
++syscall sys_mount nr 4043 nbargs 5 types: (char *, char *, char *, unsigned long, void *) args: (dev_name, dir_name, type, flags, data)
++syscall sys_oldumount nr 4045 nbargs 1 types: (char *) args: (name)
++syscall sys_setuid nr 4047 nbargs 1 types: (uid_t) args: (uid)
++syscall sys_getuid nr 4049 nbargs 0 types: () args: ()
++syscall sys_stime nr 4051 nbargs 1 types: (time_t *) args: (tptr)
++syscall sys_ptrace nr 4053 nbargs 4 types: (long, long, unsigned long, unsigned long) args: (request, pid, addr, data)
++syscall sys_alarm nr 4055 nbargs 1 types: (unsigned int) args: (seconds)
++syscall sys_pause nr 4059 nbargs 0 types: () args: ()
++syscall sys_utime nr 4061 nbargs 2 types: (char *, struct utimbuf *) args: (filename, times)
++syscall sys_access nr 4067 nbargs 2 types: (const char *, int) args: (filename, mode)
++syscall sys_nice nr 4069 nbargs 1 types: (int) args: (increment)
++syscall sys_sync nr 4073 nbargs 0 types: () args: ()
++syscall sys_kill nr 4075 nbargs 2 types: (pid_t, int) args: (pid, sig)
++syscall sys_rename nr 4077 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
++syscall sys_mkdir nr 4079 nbargs 2 types: (const char *, umode_t) args: (pathname, mode)
++syscall sys_rmdir nr 4081 nbargs 1 types: (const char *) args: (pathname)
++syscall sys_dup nr 4083 nbargs 1 types: (unsigned int) args: (fildes)
++syscall sys_times nr 4087 nbargs 1 types: (struct tms *) args: (tbuf)
++syscall sys_brk nr 4091 nbargs 1 types: (unsigned long) args: (brk)
++syscall sys_setgid nr 4093 nbargs 1 types: (gid_t) args: (gid)
++syscall sys_getgid nr 4095 nbargs 0 types: () args: ()
++syscall sys_geteuid nr 4099 nbargs 0 types: () args: ()
++syscall sys_getegid nr 4101 nbargs 0 types: () args: ()
++syscall sys_umount nr 4105 nbargs 2 types: (char *, int) args: (name, flags)
++syscall sys_ioctl nr 4109 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
++syscall sys_fcntl nr 4111 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
++syscall sys_setpgid nr 4115 nbargs 2 types: (pid_t, pid_t) args: (pid, pgid)
++syscall sys_olduname nr 4119 nbargs 1 types: (struct oldold_utsname *) args: (name)
++syscall sys_umask nr 4121 nbargs 1 types: (int) args: (mask)
++syscall sys_chroot nr 4123 nbargs 1 types: (const char *) args: (filename)
++syscall sys_ustat nr 4125 nbargs 2 types: (unsigned, struct ustat *) args: (dev, ubuf)
++syscall sys_dup2 nr 4127 nbargs 2 types: (unsigned int, unsigned int) args: (oldfd, newfd)
++syscall sys_getppid nr 4129 nbargs 0 types: () args: ()
++syscall sys_getpgrp nr 4131 nbargs 0 types: () args: ()
++syscall sys_setsid nr 4133 nbargs 0 types: () args: ()
++syscall sys_sigaction nr 4135 nbargs 3 types: (int, const struct sigaction *, struct sigaction *) args: (sig, act, oact)
++syscall sys_sgetmask nr 4137 nbargs 0 types: () args: ()
++syscall sys_ssetmask nr 4139 nbargs 1 types: (int) args: (newmask)
++syscall sys_setreuid nr 4141 nbargs 2 types: (uid_t, uid_t) args: (ruid, euid)
++syscall sys_setregid nr 4143 nbargs 2 types: (gid_t, gid_t) args: (rgid, egid)
++syscall sys_sigpending nr 4147 nbargs 1 types: (old_sigset_t *) args: (set)
++syscall sys_sethostname nr 4149 nbargs 2 types: (char *, int) args: (name, len)
++syscall sys_setrlimit nr 4151 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
++syscall sys_getrlimit nr 4153 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
++syscall sys_getrusage nr 4155 nbargs 2 types: (int, struct rusage *) args: (who, ru)
++syscall sys_gettimeofday nr 4157 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
++syscall sys_settimeofday nr 4159 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
++syscall sys_getgroups nr 4161 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
++syscall sys_setgroups nr 4163 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
++syscall sys_symlink nr 4167 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
++syscall sys_readlink nr 4171 nbargs 3 types: (const char *, char *, int) args: (path, buf, bufsiz)
++syscall sys_uselib nr 4173 nbargs 1 types: (const char *) args: (library)
++syscall sys_swapon nr 4175 nbargs 2 types: (const char *, int) args: (specialfile, swap_flags)
++syscall sys_reboot nr 4177 nbargs 4 types: (int, int, unsigned int, void *) args: (magic1, magic2, cmd, arg)
++syscall sys_old_readdir nr 4179 nbargs 3 types: (unsigned int, struct old_linux_dirent *, unsigned int) args: (fd, dirent, count)
++syscall sys_mips_mmap nr 4181 nbargs 6 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, off_t) args: (addr, len, prot, flags, fd, offset)
++syscall sys_munmap nr 4183 nbargs 2 types: (unsigned long, size_t) args: (addr, len)
++syscall sys_truncate nr 4185 nbargs 2 types: (const char *, long) args: (path, length)
++syscall sys_ftruncate nr 4187 nbargs 2 types: (unsigned int, unsigned long) args: (fd, length)
++syscall sys_fchmod nr 4189 nbargs 2 types: (unsigned int, umode_t) args: (fd, mode)
++syscall sys_fchown nr 4191 nbargs 3 types: (unsigned int, uid_t, gid_t) args: (fd, user, group)
++syscall sys_getpriority nr 4193 nbargs 2 types: (int, int) args: (which, who)
++syscall sys_setpriority nr 4195 nbargs 3 types: (int, int, int) args: (which, who, niceval)
++syscall sys_statfs nr 4199 nbargs 2 types: (const char *, struct statfs *) args: (pathname, buf)
++syscall sys_fstatfs nr 4201 nbargs 2 types: (unsigned int, struct statfs *) args: (fd, buf)
++syscall sys_socketcall nr 4205 nbargs 2 types: (int, unsigned long *) args: (call, args)
++syscall sys_syslog nr 4207 nbargs 3 types: (int, char *, int) args: (type, buf, len)
++syscall sys_setitimer nr 4209 nbargs 3 types: (int, struct itimerval *, struct itimerval *) args: (which, value, ovalue)
++syscall sys_getitimer nr 4211 nbargs 2 types: (int, struct itimerval *) args: (which, value)
++syscall sys_newstat nr 4213 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
++syscall sys_newlstat nr 4215 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
++syscall sys_newfstat nr 4217 nbargs 2 types: (unsigned int, struct stat *) args: (fd, statbuf)
++syscall sys_uname nr 4219 nbargs 1 types: (struct old_utsname *) args: (name)
++syscall sys_vhangup nr 4223 nbargs 0 types: () args: ()
++syscall sys_wait4 nr 4229 nbargs 4 types: (pid_t, int *, int, struct rusage *) args: (upid, stat_addr, options, ru)
++syscall sys_swapoff nr 4231 nbargs 1 types: (const char *) args: (specialfile)
++syscall sys_sysinfo nr 4233 nbargs 1 types: (struct sysinfo *) args: (info)
++syscall sys_ipc nr 4235 nbargs 6 types: (unsigned int, int, unsigned long, unsigned long, void *, long) args: (call, first, second, third, ptr, fifth)
++syscall sys_fsync nr 4237 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_setdomainname nr 4243 nbargs 2 types: (char *, int) args: (name, len)
++syscall sys_newuname nr 4245 nbargs 1 types: (struct new_utsname *) args: (name)
++syscall sys_adjtimex nr 4249 nbargs 1 types: (struct timex *) args: (txc_p)
++syscall sys_mprotect nr 4251 nbargs 3 types: (unsigned long, size_t, unsigned long) args: (start, len, prot)
++syscall sys_sigprocmask nr 4253 nbargs 3 types: (int, old_sigset_t *, old_sigset_t *) args: (how, nset, oset)
++syscall sys_init_module nr 4257 nbargs 3 types: (void *, unsigned long, const char *) args: (umod, len, uargs)
++syscall sys_delete_module nr 4259 nbargs 2 types: (const char *, unsigned int) args: (name_user, flags)
++syscall sys_quotactl nr 4263 nbargs 4 types: (unsigned int, const char *, qid_t, void *) args: (cmd, special, id, addr)
++syscall sys_getpgid nr 4265 nbargs 1 types: (pid_t) args: (pid)
++syscall sys_fchdir nr 4267 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_bdflush nr 4269 nbargs 2 types: (int, long) args: (func, data)
++syscall sys_sysfs nr 4271 nbargs 3 types: (int, unsigned long, unsigned long) args: (option, arg1, arg2)
++syscall sys_personality nr 4273 nbargs 1 types: (unsigned int) args: (personality)
++syscall sys_setfsuid nr 4277 nbargs 1 types: (uid_t) args: (uid)
++syscall sys_setfsgid nr 4279 nbargs 1 types: (gid_t) args: (gid)
++syscall sys_llseek nr 4281 nbargs 5 types: (unsigned int, unsigned long, unsigned long, loff_t *, unsigned int) args: (fd, offset_high, offset_low, result, origin)
++syscall sys_getdents nr 4283 nbargs 3 types: (unsigned int, struct linux_dirent *, unsigned int) args: (fd, dirent, count)
++syscall sys_select nr 4285 nbargs 5 types: (int, fd_set *, fd_set *, fd_set *, struct timeval *) args: (n, inp, outp, exp, tvp)
++syscall sys_flock nr 4287 nbargs 2 types: (unsigned int, unsigned int) args: (fd, cmd)
++syscall sys_msync nr 4289 nbargs 3 types: (unsigned long, size_t, int) args: (start, len, flags)
++syscall sys_readv nr 4291 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
++syscall sys_writev nr 4293 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
++syscall sys_cacheflush nr 4295 nbargs 3 types: (unsigned long, unsigned long, unsigned int) args: (addr, bytes, cache)
++syscall sys_cachectl nr 4297 nbargs 3 types: (char *, int, int) args: (addr, nbytes, op)
++syscall sys_getsid nr 4303 nbargs 1 types: (pid_t) args: (pid)
++syscall sys_fdatasync nr 4305 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_sysctl nr 4307 nbargs 1 types: (struct __sysctl_args *) args: (args)
++syscall sys_mlock nr 4309 nbargs 2 types: (unsigned long, size_t) args: (start, len)
++syscall sys_munlock nr 4311 nbargs 2 types: (unsigned long, size_t) args: (start, len)
++syscall sys_mlockall nr 4313 nbargs 1 types: (int) args: (flags)
++syscall sys_munlockall nr 4315 nbargs 0 types: () args: ()
++syscall sys_sched_setparam nr 4317 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
++syscall sys_sched_getparam nr 4319 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
++syscall sys_sched_setscheduler nr 4321 nbargs 3 types: (pid_t, int, struct sched_param *) args: (pid, policy, param)
++syscall sys_sched_getscheduler nr 4323 nbargs 1 types: (pid_t) args: (pid)
++syscall sys_sched_yield nr 4325 nbargs 0 types: () args: ()
++syscall sys_sched_get_priority_max nr 4327 nbargs 1 types: (int) args: (policy)
++syscall sys_sched_get_priority_min nr 4329 nbargs 1 types: (int) args: (policy)
++syscall sys_sched_rr_get_interval nr 4331 nbargs 2 types: (pid_t, struct timespec *) args: (pid, interval)
++syscall sys_nanosleep nr 4333 nbargs 2 types: (struct timespec *, struct timespec *) args: (rqtp, rmtp)
++syscall sys_mremap nr 4335 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (addr, old_len, new_len, flags, new_addr)
++syscall sys_accept nr 4337 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, upeer_sockaddr, upeer_addrlen)
++syscall sys_bind nr 4339 nbargs 3 types: (int, struct sockaddr *, int) args: (fd, umyaddr, addrlen)
++syscall sys_connect nr 4341 nbargs 3 types: (int, struct sockaddr *, int) args: (fd, uservaddr, addrlen)
++syscall sys_getpeername nr 4343 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, usockaddr, usockaddr_len)
++syscall sys_getsockname nr 4345 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, usockaddr, usockaddr_len)
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/3.5.0/mips-64-syscalls-3.5.0
+@@ -0,0 +1,289 @@
++syscall sys_waitpid nr 4007 nbargs 3 types: (pid_t, int *, int) args: (pid, stat_addr, options)
++syscall sys_oldumount nr 4022 nbargs 1 types: (char *) args: (name)
++syscall sys_nice nr 4034 nbargs 1 types: (int) args: (increment)
++syscall sys_olduname nr 4059 nbargs 1 types: (struct oldold_utsname *) args: (name)
++syscall sys_32_sigaction nr 4067 nbargs 3 types: (long, const struct sigaction32 *, struct sigaction32 *) args: (sig, act, oact)
++syscall sys_sgetmask nr 4068 nbargs 0 types: () args: ()
++syscall sys_ssetmask nr 4069 nbargs 1 types: (int) args: (newmask)
++syscall sys_uselib nr 4086 nbargs 1 types: (const char *) args: (library)
++syscall sys_uname nr 4109 nbargs 1 types: (struct old_utsname *) args: (name)
++syscall sys_32_ipc nr 4117 nbargs 6 types: (u32, long, long, long, unsigned long, unsigned long) args: (call, first, second, third, ptr, fifth)
++syscall sys_bdflush nr 4134 nbargs 2 types: (int, long) args: (func, data)
++syscall sys_32_llseek nr 4140 nbargs 5 types: (unsigned int, unsigned int, unsigned int, loff_t *, unsigned int) args: (fd, offset_high, offset_low, result, origin)
++syscall sys_send nr 4178 nbargs 4 types: (int, void *, size_t, unsigned int) args: (fd, buff, len, flags)
++syscall sys_32_pread nr 4200 nbargs 6 types: (unsigned long, char *, size_t, unsigned long, unsigned long, unsigned long) args: (fd, buf, count, unused, a4, a5)
++syscall sys_32_pwrite nr 4201 nbargs 6 types: (unsigned int, const char *, size_t, u32, u64, u64) args: (fd, buf, count, unused, a4, a5)
++syscall sys_mips_mmap2 nr 4210 nbargs 6 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (addr, len, prot, flags, fd, pgoff)
++syscall sys_32_truncate64 nr 4211 nbargs 4 types: (const char *, unsigned long, unsigned long, unsigned long) args: (path, __dummy, a2, a3)
++syscall sys_32_ftruncate64 nr 4212 nbargs 4 types: (unsigned long, unsigned long, unsigned long, unsigned long) args: (fd, __dummy, a2, a3)
++syscall sys_32_waitid nr 4278 nbargs 5 types: (int, compat_pid_t, compat_siginfo_t *, int, struct compat_rusage *) args: (which, pid, uinfo, options, uru)
++syscall sys_32_fanotify_mark nr 4337 nbargs 6 types: (int, unsigned int, u64, u64, int, const char *) args: (fanotify_fd, flags, a3, a4, dfd, pathname)
++syscall sys_rt_sigaction nr 5013 nbargs 4 types: (int, const struct sigaction *, struct sigaction *, size_t) args: (sig, act, oact, sigsetsize)
++syscall sys_rt_sigprocmask nr 5014 nbargs 4 types: (int, sigset_t *, sigset_t *, size_t) args: (how, nset, oset, sigsetsize)
++syscall sys_ioctl nr 5015 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
++syscall sys_readv nr 5018 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
++syscall sys_writev nr 5019 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
++syscall sys_select nr 5022 nbargs 5 types: (int, fd_set *, fd_set *, fd_set *, struct timeval *) args: (n, inp, outp, exp, tvp)
++syscall sys_shmctl nr 5030 nbargs 3 types: (int, int, struct shmid_ds *) args: (shmid, cmd, buf)
++syscall sys_nanosleep nr 5034 nbargs 2 types: (struct timespec *, struct timespec *) args: (rqtp, rmtp)
++syscall sys_getitimer nr 5035 nbargs 2 types: (int, struct itimerval *) args: (which, value)
++syscall sys_setitimer nr 5036 nbargs 3 types: (int, struct itimerval *, struct itimerval *) args: (which, value, ovalue)
++syscall sys_recvfrom nr 5044 nbargs 6 types: (int, void *, size_t, unsigned int, struct sockaddr *, int *) args: (fd, ubuf, size, flags, addr, addr_len)
++syscall sys_sendmsg nr 5045 nbargs 3 types: (int, struct msghdr *, unsigned int) args: (fd, msg, flags)
++syscall sys_recvmsg nr 5046 nbargs 3 types: (int, struct msghdr *, unsigned int) args: (fd, msg, flags)
++syscall sys_setsockopt nr 5053 nbargs 5 types: (int, int, int, char *, int) args: (fd, level, optname, optval, optlen)
++syscall sys_wait4 nr 5059 nbargs 4 types: (pid_t, int *, int, struct rusage *) args: (upid, stat_addr, options, ru)
++syscall sys_msgsnd nr 5067 nbargs 4 types: (int, struct msgbuf *, size_t, int) args: (msqid, msgp, msgsz, msgflg)
++syscall sys_msgrcv nr 5068 nbargs 5 types: (int, struct msgbuf *, size_t, long, int) args: (msqid, msgp, msgsz, msgtyp, msgflg)
++syscall sys_msgctl nr 5069 nbargs 3 types: (int, int, struct msqid_ds *) args: (msqid, cmd, buf)
++syscall sys_fcntl nr 5070 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
++syscall sys_getdents nr 5076 nbargs 3 types: (unsigned int, struct linux_dirent *, unsigned int) args: (fd, dirent, count)
++syscall sys_gettimeofday nr 5094 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
++syscall sys_getrlimit nr 5095 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
++syscall sys_getrusage nr 5096 nbargs 2 types: (int, struct rusage *) args: (who, ru)
++syscall sys_sysinfo nr 5097 nbargs 1 types: (struct sysinfo *) args: (info)
++syscall sys_times nr 5098 nbargs 1 types: (struct tms *) args: (tbuf)
++syscall sys_ptrace nr 5099 nbargs 4 types: (long, long, unsigned long, unsigned long) args: (request, pid, addr, data)
++syscall sys_rt_sigpending nr 5125 nbargs 2 types: (sigset_t *, size_t) args: (set, sigsetsize)
++syscall sys_rt_sigtimedwait nr 5126 nbargs 4 types: (const sigset_t *, siginfo_t *, const struct timespec *, size_t) args: (uthese, uinfo, uts, sigsetsize)
++syscall sys_rt_sigqueueinfo nr 5127 nbargs 3 types: (pid_t, int, siginfo_t *) args: (pid, sig, uinfo)
++syscall sys_utime nr 5130 nbargs 2 types: (char *, struct utimbuf *) args: (filename, times)
++syscall sys_personality nr 5132 nbargs 1 types: (unsigned int) args: (personality)
++syscall sys_ustat nr 5133 nbargs 2 types: (unsigned, struct ustat *) args: (dev, ubuf)
++syscall sys_statfs nr 5134 nbargs 2 types: (const char *, struct statfs *) args: (pathname, buf)
++syscall sys_fstatfs nr 5135 nbargs 2 types: (unsigned int, struct statfs *) args: (fd, buf)
++syscall sys_sched_rr_get_interval nr 5145 nbargs 2 types: (pid_t, struct timespec *) args: (pid, interval)
++syscall sys_sysctl nr 5152 nbargs 1 types: (struct __sysctl_args *) args: (args)
++syscall sys_adjtimex nr 5154 nbargs 1 types: (struct timex *) args: (txc_p)
++syscall sys_setrlimit nr 5155 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
++syscall sys_settimeofday nr 5159 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
++syscall sys_mount nr 5160 nbargs 5 types: (char *, char *, char *, unsigned long, void *) args: (dev_name, dir_name, type, flags, data)
++syscall sys_futex nr 5194 nbargs 6 types: (u32 *, int, u32, struct timespec *, u32 *, u32) args: (uaddr, op, val, utime, uaddr2, val3)
++syscall sys_sched_setaffinity nr 5195 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
++syscall sys_sched_getaffinity nr 5196 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
++syscall sys_io_setup nr 5200 nbargs 2 types: (unsigned, aio_context_t *) args: (nr_events, ctxp)
++syscall sys_io_getevents nr 5202 nbargs 5 types: (aio_context_t, long, long, struct io_event *, struct timespec *) args: (ctx_id, min_nr, nr, events, timeout)
++syscall sys_io_submit nr 5203 nbargs 3 types: (aio_context_t, long, struct iocb * *) args: (ctx_id, nr, iocbpp)
++syscall sys_semtimedop nr 5214 nbargs 4 types: (int, struct sembuf *, unsigned, const struct timespec *) args: (semid, tsops, nsops, timeout)
++syscall sys_timer_create nr 5216 nbargs 3 types: (const clockid_t, struct sigevent *, timer_t *) args: (which_clock, timer_event_spec, created_timer_id)
++syscall sys_timer_settime nr 5217 nbargs 4 types: (timer_t, int, const struct itimerspec *, struct itimerspec *) args: (timer_id, flags, new_setting, old_setting)
++syscall sys_timer_gettime nr 5218 nbargs 2 types: (timer_t, struct itimerspec *) args: (timer_id, setting)
++syscall sys_clock_settime nr 5221 nbargs 2 types: (const clockid_t, const struct timespec *) args: (which_clock, tp)
++syscall sys_clock_gettime nr 5222 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
++syscall sys_clock_getres nr 5223 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
++syscall sys_clock_nanosleep nr 5224 nbargs 4 types: (const clockid_t, int, const struct timespec *, struct timespec *) args: (which_clock, flags, rqtp, rmtp)
++syscall sys_utimes nr 5226 nbargs 2 types: (char *, struct timeval *) args: (filename, utimes)
++syscall sys_waitid nr 5237 nbargs 5 types: (int, pid_t, struct siginfo *, int, struct rusage *) args: (which, upid, infop, options, ru)
++syscall sys_futimesat nr 5251 nbargs 3 types: (int, const char *, struct timeval *) args: (dfd, filename, utimes)
++syscall sys_pselect6 nr 5260 nbargs 6 types: (int, fd_set *, fd_set *, fd_set *, struct timespec *, void *) args: (n, inp, outp, exp, tsp, sig)
++syscall sys_ppoll nr 5261 nbargs 5 types: (struct pollfd *, unsigned int, struct timespec *, const sigset_t *, size_t) args: (ufds, nfds, tsp, sigmask, sigsetsize)
++syscall sys_vmsplice nr 5266 nbargs 4 types: (int, const struct iovec *, unsigned long, unsigned int) args: (fd, iov, nr_segs, flags)
++syscall sys_set_robust_list nr 5268 nbargs 2 types: (struct robust_list_head *, size_t) args: (head, len)
++syscall sys_get_robust_list nr 5269 nbargs 3 types: (int, struct robust_list_head * *, size_t *) args: (pid, head_ptr, len_ptr)
++syscall sys_epoll_pwait nr 5272 nbargs 6 types: (int, struct epoll_event *, int, int, const sigset_t *, size_t) args: (epfd, events, maxevents, timeout, sigmask, sigsetsize)
++syscall sys_utimensat nr 5275 nbargs 4 types: (int, const char *, struct timespec *, int) args: (dfd, filename, utimes, flags)
++syscall sys_signalfd nr 5276 nbargs 3 types: (int, sigset_t *, size_t) args: (ufd, user_mask, sizemask)
++syscall sys_timerfd_gettime nr 5281 nbargs 2 types: (int, struct itimerspec *) args: (ufd, otmr)
++syscall sys_timerfd_settime nr 5282 nbargs 4 types: (int, int, const struct itimerspec *, struct itimerspec *) args: (ufd, flags, utmr, otmr)
++syscall sys_rt_tgsigqueueinfo nr 5291 nbargs 4 types: (pid_t, pid_t, int, siginfo_t *) args: (tgid, pid, sig, uinfo)
++syscall sys_recvmmsg nr 5294 nbargs 5 types: (int, struct mmsghdr *, unsigned int, unsigned int, struct timespec *) args: (fd, mmsg, vlen, flags, timeout)
++syscall sys_clock_adjtime nr 5300 nbargs 2 types: (const clockid_t, struct timex *) args: (which_clock, utx)
++syscall sys_sendmmsg nr 5302 nbargs 4 types: (int, struct mmsghdr *, unsigned int, unsigned int) args: (fd, mmsg, vlen, flags)
++syscall sys_process_vm_readv nr 5304 nbargs 6 types: (pid_t, const struct iovec *, unsigned long, const struct iovec *, unsigned long, unsigned long) args: (pid, lvec, liovcnt, rvec, riovcnt, flags)
++syscall sys_process_vm_writev nr 5305 nbargs 6 types: (pid_t, const struct iovec *, unsigned long, const struct iovec *, unsigned long, unsigned long) args: (pid, lvec, liovcnt, rvec, riovcnt, flags)
++syscall sys_read nr 6000 nbargs 3 types: (unsigned int, char *, size_t) args: (fd, buf, count)
++syscall sys_write nr 6001 nbargs 3 types: (unsigned int, const char *, size_t) args: (fd, buf, count)
++syscall sys_open nr 6002 nbargs 3 types: (const char *, int, umode_t) args: (filename, flags, mode)
++syscall sys_close nr 6003 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_newstat nr 6004 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
++syscall sys_newfstat nr 6005 nbargs 2 types: (unsigned int, struct stat *) args: (fd, statbuf)
++syscall sys_newlstat nr 6006 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
++syscall sys_poll nr 6007 nbargs 3 types: (struct pollfd *, unsigned int, int) args: (ufds, nfds, timeout_msecs)
++syscall sys_lseek nr 6008 nbargs 3 types: (unsigned int, off_t, unsigned int) args: (fd, offset, origin)
++syscall sys_mips_mmap nr 6009 nbargs 6 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, off_t) args: (addr, len, prot, flags, fd, offset)
++syscall sys_mprotect nr 6010 nbargs 3 types: (unsigned long, size_t, unsigned long) args: (start, len, prot)
++syscall sys_munmap nr 6011 nbargs 2 types: (unsigned long, size_t) args: (addr, len)
++syscall sys_brk nr 6012 nbargs 1 types: (unsigned long) args: (brk)
++syscall sys_32_rt_sigaction nr 6013 nbargs 4 types: (int, const struct sigaction32 *, struct sigaction32 *, unsigned int) args: (sig, act, oact, sigsetsize)
++syscall sys_32_rt_sigprocmask nr 6014 nbargs 4 types: (int, compat_sigset_t *, compat_sigset_t *, unsigned int) args: (how, set, oset, sigsetsize)
++syscall sys_access nr 6020 nbargs 2 types: (const char *, int) args: (filename, mode)
++syscall sys_sched_yield nr 6023 nbargs 0 types: () args: ()
++syscall sys_mremap nr 6024 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (addr, old_len, new_len, flags, new_addr)
++syscall sys_msync nr 6025 nbargs 3 types: (unsigned long, size_t, int) args: (start, len, flags)
++syscall sys_mincore nr 6026 nbargs 3 types: (unsigned long, size_t, unsigned char *) args: (start, len, vec)
++syscall sys_madvise nr 6027 nbargs 3 types: (unsigned long, size_t, int) args: (start, len_in, behavior)
++syscall sys_shmget nr 6028 nbargs 3 types: (key_t, size_t, int) args: (key, size, shmflg)
++syscall sys_shmat nr 6029 nbargs 3 types: (int, char *, int) args: (shmid, shmaddr, shmflg)
++syscall sys_dup nr 6031 nbargs 1 types: (unsigned int) args: (fildes)
++syscall sys_dup2 nr 6032 nbargs 2 types: (unsigned int, unsigned int) args: (oldfd, newfd)
++syscall sys_pause nr 6033 nbargs 0 types: () args: ()
++syscall sys_alarm nr 6037 nbargs 1 types: (unsigned int) args: (seconds)
++syscall sys_getpid nr 6038 nbargs 0 types: () args: ()
++syscall sys_32_sendfile nr 6039 nbargs 4 types: (long, long, compat_off_t *, s32) args: (out_fd, in_fd, offset, count)
++syscall sys_socket nr 6040 nbargs 3 types: (int, int, int) args: (family, type, protocol)
++syscall sys_connect nr 6041 nbargs 3 types: (int, struct sockaddr *, int) args: (fd, uservaddr, addrlen)
++syscall sys_accept nr 6042 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, upeer_sockaddr, upeer_addrlen)
++syscall sys_sendto nr 6043 nbargs 6 types: (int, void *, size_t, unsigned int, struct sockaddr *, int) args: (fd, buff, len, flags, addr, addr_len)
++syscall sys_shutdown nr 6047 nbargs 2 types: (int, int) args: (fd, how)
++syscall sys_bind nr 6048 nbargs 3 types: (int, struct sockaddr *, int) args: (fd, umyaddr, addrlen)
++syscall sys_listen nr 6049 nbargs 2 types: (int, int) args: (fd, backlog)
++syscall sys_getsockname nr 6050 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, usockaddr, usockaddr_len)
++syscall sys_getpeername nr 6051 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, usockaddr, usockaddr_len)
++syscall sys_socketpair nr 6052 nbargs 4 types: (int, int, int, int *) args: (family, type, protocol, usockvec)
++syscall sys_getsockopt nr 6054 nbargs 5 types: (int, int, int, char *, int *) args: (fd, level, optname, optval, optlen)
++syscall sys_exit nr 6058 nbargs 1 types: (int) args: (error_code)
++syscall sys_kill nr 6060 nbargs 2 types: (pid_t, int) args: (pid, sig)
++syscall sys_newuname nr 6061 nbargs 1 types: (struct new_utsname *) args: (name)
++syscall sys_semget nr 6062 nbargs 3 types: (key_t, int, int) args: (key, nsems, semflg)
++syscall sys_semop nr 6063 nbargs 3 types: (int, struct sembuf *, unsigned) args: (semid, tsops, nsops)
++syscall sys_n32_semctl nr 6064 nbargs 4 types: (int, int, int, u32) args: (semid, semnum, cmd, arg)
++syscall sys_shmdt nr 6065 nbargs 1 types: (char *) args: (shmaddr)
++syscall sys_msgget nr 6066 nbargs 2 types: (key_t, int) args: (key, msgflg)
++syscall sys_n32_msgsnd nr 6067 nbargs 4 types: (int, u32, unsigned int, int) args: (msqid, msgp, msgsz, msgflg)
++syscall sys_n32_msgrcv nr 6068 nbargs 5 types: (int, u32, size_t, int, int) args: (msqid, msgp, msgsz, msgtyp, msgflg)
++syscall sys_flock nr 6071 nbargs 2 types: (unsigned int, unsigned int) args: (fd, cmd)
++syscall sys_fsync nr 6072 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_fdatasync nr 6073 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_truncate nr 6074 nbargs 2 types: (const char *, long) args: (path, length)
++syscall sys_ftruncate nr 6075 nbargs 2 types: (unsigned int, unsigned long) args: (fd, length)
++syscall sys_getcwd nr 6077 nbargs 2 types: (char *, unsigned long) args: (buf, size)
++syscall sys_chdir nr 6078 nbargs 1 types: (const char *) args: (filename)
++syscall sys_fchdir nr 6079 nbargs 1 types: (unsigned int) args: (fd)
++syscall sys_rename nr 6080 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
++syscall sys_mkdir nr 6081 nbargs 2 types: (const char *, umode_t) args: (pathname, mode)
++syscall sys_rmdir nr 6082 nbargs 1 types: (const char *) args: (pathname)
++syscall sys_creat nr 6083 nbargs 2 types: (const char *, umode_t) args: (pathname, mode)
++syscall sys_link nr 6084 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
++syscall sys_unlink nr 6085 nbargs 1 types: (const char *) args: (pathname)
++syscall sys_symlink nr 6086 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
++syscall sys_readlink nr 6087 nbargs 3 types: (const char *, char *, int) args: (path, buf, bufsiz)
++syscall sys_chmod nr 6088 nbargs 2 types: (const char *, umode_t) args: (filename, mode)
++syscall sys_fchmod nr 6089 nbargs 2 types: (unsigned int, umode_t) args: (fd, mode)
++syscall sys_chown nr 6090 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
++syscall sys_fchown nr 6091 nbargs 3 types: (unsigned int, uid_t, gid_t) args: (fd, user, group)
++syscall sys_lchown nr 6092 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
++syscall sys_umask nr 6093 nbargs 1 types: (int) args: (mask)
++syscall sys_getuid nr 6100 nbargs 0 types: () args: ()
++syscall sys_syslog nr 6101 nbargs 3 types: (int, char *, int) args: (type, buf, len)
++syscall sys_getgid nr 6102 nbargs 0 types: () args: ()
++syscall sys_setuid nr 6103 nbargs 1 types: (uid_t) args: (uid)
++syscall sys_setgid nr 6104 nbargs 1 types: (gid_t) args: (gid)
++syscall sys_geteuid nr 6105 nbargs 0 types: () args: ()
++syscall sys_getegid nr 6106 nbargs 0 types: () args: ()
++syscall sys_setpgid nr 6107 nbargs 2 types: (pid_t, pid_t) args: (pid, pgid)
++syscall sys_getppid nr 6108 nbargs 0 types: () args: ()
++syscall sys_getpgrp nr 6109 nbargs 0 types: () args: ()
++syscall sys_setsid nr 6110 nbargs 0 types: () args: ()
++syscall sys_setreuid nr 6111 nbargs 2 types: (uid_t, uid_t) args: (ruid, euid)
++syscall sys_setregid nr 6112 nbargs 2 types: (gid_t, gid_t) args: (rgid, egid)
++syscall sys_getgroups nr 6113 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
++syscall sys_setgroups nr 6114 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
++syscall sys_setresuid nr 6115 nbargs 3 types: (uid_t, uid_t, uid_t) args: (ruid, euid, suid)
++syscall sys_getresuid nr 6116 nbargs 3 types: (uid_t *, uid_t *, uid_t *) args: (ruidp, euidp, suidp)
++syscall sys_setresgid nr 6117 nbargs 3 types: (gid_t, gid_t, gid_t) args: (rgid, egid, sgid)
++syscall sys_getresgid nr 6118 nbargs 3 types: (gid_t *, gid_t *, gid_t *) args: (rgidp, egidp, sgidp)
++syscall sys_getpgid nr 6119 nbargs 1 types: (pid_t) args: (pid)
++syscall sys_setfsuid nr 6120 nbargs 1 types: (uid_t) args: (uid)
++syscall sys_setfsgid nr 6121 nbargs 1 types: (gid_t) args: (gid)
++syscall sys_getsid nr 6122 nbargs 1 types: (pid_t) args: (pid)
++syscall sys_capget nr 6123 nbargs 2 types: (cap_user_header_t, cap_user_data_t) args: (header, dataptr)
++syscall sys_capset nr 6124 nbargs 2 types: (cap_user_header_t, const cap_user_data_t) args: (header, data)
++syscall sys_32_rt_sigpending nr 6125 nbargs 2 types: (compat_sigset_t *, unsigned int) args: (uset, sigsetsize)
++syscall sys_32_rt_sigqueueinfo nr 6127 nbargs 3 types: (int, int, compat_siginfo_t *) args: (pid, sig, uinfo)
++syscall sys_mknod nr 6131 nbargs 3 types: (const char *, umode_t, unsigned) args: (filename, mode, dev)
++syscall sys_32_personality nr 6132 nbargs 1 types: (unsigned long) args: (personality)
++syscall sys_sysfs nr 6136 nbargs 3 types: (int, unsigned long, unsigned long) args: (option, arg1, arg2)
++syscall sys_getpriority nr 6137 nbargs 2 types: (int, int) args: (which, who)
++syscall sys_setpriority nr 6138 nbargs 3 types: (int, int, int) args: (which, who, niceval)
++syscall sys_sched_setparam nr 6139 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
++syscall sys_sched_getparam nr 6140 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
++syscall sys_sched_setscheduler nr 6141 nbargs 3 types: (pid_t, int, struct sched_param *) args: (pid, policy, param)
++syscall sys_sched_getscheduler nr 6142 nbargs 1 types: (pid_t) args: (pid)
++syscall sys_sched_get_priority_max nr 6143 nbargs 1 types: (int) args: (policy)
++syscall sys_sched_get_priority_min nr 6144 nbargs 1 types: (int) args: (policy)
++syscall sys_32_sched_rr_get_interval nr 6145 nbargs 2 types: (compat_pid_t, struct compat_timespec *) args: (pid, interval)
++syscall sys_mlock nr 6146 nbargs 2 types: (unsigned long, size_t) args: (start, len)
++syscall sys_munlock nr 6147 nbargs 2 types: (unsigned long, size_t) args: (start, len)
++syscall sys_mlockall nr 6148 nbargs 1 types: (int) args: (flags)
++syscall sys_munlockall nr 6149 nbargs 0 types: () args: ()
++syscall sys_vhangup nr 6150 nbargs 0 types: () args: ()
++syscall sys_pivot_root nr 6151 nbargs 2 types: (const char *, const char *) args: (new_root, put_old)
++syscall sys_prctl nr 6153 nbargs 5 types: (int, unsigned long, unsigned long, unsigned long, unsigned long) args: (option, arg2, arg3, arg4, arg5)
++syscall sys_chroot nr 6156 nbargs 1 types: (const char *) args: (filename)
++syscall sys_sync nr 6157 nbargs 0 types: () args: ()
++syscall sys_umount nr 6161 nbargs 2 types: (char *, int) args: (name, flags)
++syscall sys_swapon nr 6162 nbargs 2 types: (const char *, int) args: (specialfile, swap_flags)
++syscall sys_swapoff nr 6163 nbargs 1 types: (const char *) args: (specialfile)
++syscall sys_reboot nr 6164 nbargs 4 types: (int, int, unsigned int, void *) args: (magic1, magic2, cmd, arg)
++syscall sys_sethostname nr 6165 nbargs 2 types: (char *, int) args: (name, len)
++syscall sys_setdomainname nr 6166 nbargs 2 types: (char *, int) args: (name, len)
++syscall sys_init_module nr 6168 nbargs 3 types: (void *, unsigned long, const char *) args: (umod, len, uargs)
++syscall sys_delete_module nr 6169 nbargs 2 types: (const char *, unsigned int) args: (name_user, flags)
++syscall sys_quotactl nr 6172 nbargs 4 types: (unsigned int, const char *, qid_t, void *) args: (cmd, special, id, addr)
++syscall sys_gettid nr 6178 nbargs 0 types: () args: ()
++syscall sys_setxattr nr 6180 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
++syscall sys_lsetxattr nr 6181 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
++syscall sys_fsetxattr nr 6182 nbargs 5 types: (int, const char *, const void *, size_t, int) args: (fd, name, value, size, flags)
++syscall sys_getxattr nr 6183 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
++syscall sys_lgetxattr nr 6184 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
++syscall sys_fgetxattr nr 6185 nbargs 4 types: (int, const char *, void *, size_t) args: (fd, name, value, size)
++syscall sys_listxattr nr 6186 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
++syscall sys_llistxattr nr 6187 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
++syscall sys_flistxattr nr 6188 nbargs 3 types: (int, char *, size_t) args: (fd, list, size)
++syscall sys_removexattr nr 6189 nbargs 2 types: (const char *, const char *) args: (pathname, name)
++syscall sys_lremovexattr nr 6190 nbargs 2 types: (const char *, const char *) args: (pathname, name)
++syscall sys_fremovexattr nr 6191 nbargs 2 types: (int, const char *) args: (fd, name)
++syscall sys_tkill nr 6192 nbargs 2 types: (pid_t, int) args: (pid, sig)
++syscall sys_32_futex nr 6194 nbargs 6 types: (u32 *, int, u32, struct compat_timespec *, u32 *, u32) args: (uaddr, op, val, utime, uaddr2, val3)
++syscall sys_cacheflush nr 6197 nbargs 3 types: (unsigned long, unsigned long, unsigned int) args: (addr, bytes, cache)
++syscall sys_cachectl nr 6198 nbargs 3 types: (char *, int, int) args: (addr, nbytes, op)
++syscall sys_io_destroy nr 6201 nbargs 1 types: (aio_context_t) args: (ctx)
++syscall sys_io_cancel nr 6204 nbargs 3 types: (aio_context_t, struct iocb *, struct io_event *) args: (ctx_id, iocb, result)
++syscall sys_exit_group nr 6205 nbargs 1 types: (int) args: (error_code)
++syscall sys_epoll_create nr 6207 nbargs 1 types: (int) args: (size)
++syscall sys_epoll_ctl nr 6208 nbargs 4 types: (int, int, int, struct epoll_event *) args: (epfd, op, fd, event)
++syscall sys_epoll_wait nr 6209 nbargs 4 types: (int, struct epoll_event *, int, int) args: (epfd, events, maxevents, timeout)
++syscall sys_remap_file_pages nr 6210 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (start, size, prot, pgoff, flags)
++syscall sys_set_tid_address nr 6213 nbargs 1 types: (int *) args: (tidptr)
++syscall sys_restart_syscall nr 6214 nbargs 0 types: () args: ()
++syscall sys_sendfile64 nr 6219 nbargs 4 types: (int, int, loff_t *, size_t) args: (out_fd, in_fd, offset, count)
++syscall sys_timer_getoverrun nr 6223 nbargs 1 types: (timer_t) args: (timer_id)
++syscall sys_timer_delete nr 6224 nbargs 1 types: (timer_t) args: (timer_id)
++syscall sys_tgkill nr 6229 nbargs 3 types: (pid_t, pid_t, int) args: (tgid, pid, sig)
++syscall sys_set_thread_area nr 6246 nbargs 1 types: (unsigned long) args: (addr)
++syscall sys_inotify_init nr 6247 nbargs 0 types: () args: ()
++syscall sys_inotify_add_watch nr 6248 nbargs 3 types: (int, const char *, u32) args: (fd, pathname, mask)
++syscall sys_inotify_rm_watch nr 6249 nbargs 2 types: (int, __s32) args: (fd, wd)
++syscall sys_openat nr 6251 nbargs 4 types: (int, const char *, int, umode_t) args: (dfd, filename, flags, mode)
++syscall sys_mkdirat nr 6252 nbargs 3 types: (int, const char *, umode_t) args: (dfd, pathname, mode)
++syscall sys_mknodat nr 6253 nbargs 4 types: (int, const char *, umode_t, unsigned) args: (dfd, filename, mode, dev)
++syscall sys_fchownat nr 6254 nbargs 5 types: (int, const char *, uid_t, gid_t, int) args: (dfd, filename, user, group, flag)
++syscall sys_newfstatat nr 6256 nbargs 4 types: (int, const char *, struct stat *, int) args: (dfd, filename, statbuf, flag)
++syscall sys_unlinkat nr 6257 nbargs 3 types: (int, const char *, int) args: (dfd, pathname, flag)
++syscall sys_renameat nr 6258 nbargs 4 types: (int, const char *, int, const char *) args: (olddfd, oldname, newdfd, newname)
++syscall sys_linkat nr 6259 nbargs 5 types: (int, const char *, int, const char *, int) args: (olddfd, oldname, newdfd, newname, flags)
++syscall sys_symlinkat nr 6260 nbargs 3 types: (const char *, int, const char *) args: (oldname, newdfd, newname)
++syscall sys_readlinkat nr 6261 nbargs 4 types: (int, const char *, char *, int) args: (dfd, pathname, buf, bufsiz)
++syscall sys_fchmodat nr 6262 nbargs 3 types: (int, const char *, umode_t) args: (dfd, filename, mode)
++syscall sys_faccessat nr 6263 nbargs 3 types: (int, const char *, int) args: (dfd, filename, mode)
++syscall sys_unshare nr 6266 nbargs 1 types: (unsigned long) args: (unshare_flags)
++syscall sys_splice nr 6267 nbargs 6 types: (int, loff_t *, int, loff_t *, size_t, unsigned int) args: (fd_in, off_in, fd_out, off_out, len, flags)
++syscall sys_tee nr 6269 nbargs 4 types: (int, int, size_t, unsigned int) args: (fdin, fdout, len, flags)
++syscall sys_getcpu nr 6275 nbargs 3 types: (unsigned *, unsigned *, struct getcpu_cache *) args: (cpup, nodep, unused)
++syscall sys_ioprio_set nr 6277 nbargs 3 types: (int, int, int) args: (which, who, ioprio)
++syscall sys_ioprio_get nr 6278 nbargs 2 types: (int, int) args: (which, who)
++syscall sys_eventfd nr 6282 nbargs 1 types: (unsigned int) args: (count)
++syscall sys_timerfd_create nr 6284 nbargs 2 types: (int, int) args: (clockid, flags)
++syscall sys_signalfd4 nr 6287 nbargs 4 types: (int, sigset_t *, size_t, int) args: (ufd, user_mask, sizemask, flags)
++syscall sys_eventfd2 nr 6288 nbargs 2 types: (unsigned int, int) args: (count, flags)
++syscall sys_epoll_create1 nr 6289 nbargs 1 types: (int) args: (flags)
++syscall sys_dup3 nr 6290 nbargs 3 types: (unsigned int, unsigned int, int) args: (oldfd, newfd, flags)
++syscall sys_pipe2 nr 6291 nbargs 2 types: (int *, int) args: (fildes, flags)
++syscall sys_inotify_init1 nr 6292 nbargs 1 types: (int) args: (flags)
++syscall sys_preadv nr 6293 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
++syscall sys_pwritev nr 6294 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
++syscall sys_accept4 nr 6297 nbargs 4 types: (int, struct sockaddr *, int *, int) args: (fd, upeer_sockaddr, upeer_addrlen, flags)
++syscall sys_getdents64 nr 6299 nbargs 3 types: (unsigned int, struct linux_dirent64 *, unsigned int) args: (fd, dirent, count)
++syscall sys_prlimit64 nr 6302 nbargs 4 types: (pid_t, unsigned int, const struct rlimit64 *, struct rlimit64 *) args: (pid, resource, new_rlim, old_rlim)
++syscall sys_syncfs nr 6306 nbargs 1 types: (int) args: (fd)
++syscall sys_setns nr 6308 nbargs 2 types: (int, int) args: (fd, nstype)
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/README
+@@ -0,0 +1,18 @@
++LTTng system call tracing
++
++1) lttng-syscall-extractor
++
++You need to build a kernel with CONFIG_FTRACE_SYSCALLS=y and
++CONFIG_KALLSYMS_ALL=y for extraction. Apply the linker patch to get your
++kernel to keep the system call metadata after boot. Then build and load
++the LTTng syscall extractor module. The module will fail to load (this
++is expected). See the dmesg output for system call metadata.
++
++2) Generate system call TRACE_EVENT().
++
++Take the dmesg metadata and feed it to lttng-syscalls-generate-headers.sh, e.g.,
++from the instrumentation/syscalls directory. See the script header for
++usage example. It should be run for both the integers and pointers types.
++
++After these are created, we just need to follow the new system call additions,
++no need to regenerate the whole thing, since system calls are only appended to.
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/arm-32-syscalls-3.4.25_integers.h
+@@ -0,0 +1,1181 @@
++/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */
++#ifndef CREATE_SYSCALL_TABLE
++
++#if !defined(_TRACE_SYSCALLS_INTEGERS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SYSCALLS_INTEGERS_H
++
++#include <linux/tracepoint.h>
++#include <linux/syscalls.h>
++#include "arm-32-syscalls-3.4.25_integers_override.h"
++#include "syscalls_integers_override.h"
++
++SC_DECLARE_EVENT_CLASS_NOARGS(syscalls_noargs,
++ TP_STRUCT__entry(),
++ TP_fast_assign(),
++ TP_printk()
++)
++#ifndef OVERRIDE_32_sys_restart_syscall
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_restart_syscall)
++#endif
++#ifndef OVERRIDE_32_sys_getpid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getpid)
++#endif
++#ifndef OVERRIDE_32_sys_getuid16
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getuid16)
++#endif
++#ifndef OVERRIDE_32_sys_pause
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_pause)
++#endif
++#ifndef OVERRIDE_32_sys_sync
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sync)
++#endif
++#ifndef OVERRIDE_32_sys_getgid16
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getgid16)
++#endif
++#ifndef OVERRIDE_32_sys_geteuid16
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_geteuid16)
++#endif
++#ifndef OVERRIDE_32_sys_getegid16
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getegid16)
++#endif
++#ifndef OVERRIDE_32_sys_getppid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getppid)
++#endif
++#ifndef OVERRIDE_32_sys_getpgrp
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getpgrp)
++#endif
++#ifndef OVERRIDE_32_sys_setsid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_setsid)
++#endif
++#ifndef OVERRIDE_32_sys_vhangup
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_vhangup)
++#endif
++#ifndef OVERRIDE_32_sys_munlockall
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_munlockall)
++#endif
++#ifndef OVERRIDE_32_sys_sched_yield
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sched_yield)
++#endif
++#ifndef OVERRIDE_32_sys_getuid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getuid)
++#endif
++#ifndef OVERRIDE_32_sys_getgid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getgid)
++#endif
++#ifndef OVERRIDE_32_sys_geteuid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_geteuid)
++#endif
++#ifndef OVERRIDE_32_sys_getegid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getegid)
++#endif
++#ifndef OVERRIDE_32_sys_gettid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_gettid)
++#endif
++#ifndef OVERRIDE_32_sys_inotify_init
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_inotify_init)
++#endif
++#ifndef OVERRIDE_32_sys_exit
++SC_TRACE_EVENT(sys_exit,
++ TP_PROTO(int error_code),
++ TP_ARGS(error_code),
++ TP_STRUCT__entry(__field(int, error_code)),
++ TP_fast_assign(tp_assign(error_code, error_code)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_close
++SC_TRACE_EVENT(sys_close,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setuid16
++SC_TRACE_EVENT(sys_setuid16,
++ TP_PROTO(old_uid_t uid),
++ TP_ARGS(uid),
++ TP_STRUCT__entry(__field(old_uid_t, uid)),
++ TP_fast_assign(tp_assign(uid, uid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_nice
++SC_TRACE_EVENT(sys_nice,
++ TP_PROTO(int increment),
++ TP_ARGS(increment),
++ TP_STRUCT__entry(__field(int, increment)),
++ TP_fast_assign(tp_assign(increment, increment)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_dup
++SC_TRACE_EVENT(sys_dup,
++ TP_PROTO(unsigned int fildes),
++ TP_ARGS(fildes),
++ TP_STRUCT__entry(__field(unsigned int, fildes)),
++ TP_fast_assign(tp_assign(fildes, fildes)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_brk
++SC_TRACE_EVENT(sys_brk,
++ TP_PROTO(unsigned long brk),
++ TP_ARGS(brk),
++ TP_STRUCT__entry(__field(unsigned long, brk)),
++ TP_fast_assign(tp_assign(brk, brk)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setgid16
++SC_TRACE_EVENT(sys_setgid16,
++ TP_PROTO(old_gid_t gid),
++ TP_ARGS(gid),
++ TP_STRUCT__entry(__field(old_gid_t, gid)),
++ TP_fast_assign(tp_assign(gid, gid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_umask
++SC_TRACE_EVENT(sys_umask,
++ TP_PROTO(int mask),
++ TP_ARGS(mask),
++ TP_STRUCT__entry(__field(int, mask)),
++ TP_fast_assign(tp_assign(mask, mask)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fsync
++SC_TRACE_EVENT(sys_fsync,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getpgid
++SC_TRACE_EVENT(sys_getpgid,
++ TP_PROTO(pid_t pid),
++ TP_ARGS(pid),
++ TP_STRUCT__entry(__field(pid_t, pid)),
++ TP_fast_assign(tp_assign(pid, pid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fchdir
++SC_TRACE_EVENT(sys_fchdir,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_personality
++SC_TRACE_EVENT(sys_personality,
++ TP_PROTO(unsigned int personality),
++ TP_ARGS(personality),
++ TP_STRUCT__entry(__field(unsigned int, personality)),
++ TP_fast_assign(tp_assign(personality, personality)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setfsuid16
++SC_TRACE_EVENT(sys_setfsuid16,
++ TP_PROTO(old_uid_t uid),
++ TP_ARGS(uid),
++ TP_STRUCT__entry(__field(old_uid_t, uid)),
++ TP_fast_assign(tp_assign(uid, uid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setfsgid16
++SC_TRACE_EVENT(sys_setfsgid16,
++ TP_PROTO(old_gid_t gid),
++ TP_ARGS(gid),
++ TP_STRUCT__entry(__field(old_gid_t, gid)),
++ TP_fast_assign(tp_assign(gid, gid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getsid
++SC_TRACE_EVENT(sys_getsid,
++ TP_PROTO(pid_t pid),
++ TP_ARGS(pid),
++ TP_STRUCT__entry(__field(pid_t, pid)),
++ TP_fast_assign(tp_assign(pid, pid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fdatasync
++SC_TRACE_EVENT(sys_fdatasync,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mlockall
++SC_TRACE_EVENT(sys_mlockall,
++ TP_PROTO(int flags),
++ TP_ARGS(flags),
++ TP_STRUCT__entry(__field(int, flags)),
++ TP_fast_assign(tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_getscheduler
++SC_TRACE_EVENT(sys_sched_getscheduler,
++ TP_PROTO(pid_t pid),
++ TP_ARGS(pid),
++ TP_STRUCT__entry(__field(pid_t, pid)),
++ TP_fast_assign(tp_assign(pid, pid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_get_priority_max
++SC_TRACE_EVENT(sys_sched_get_priority_max,
++ TP_PROTO(int policy),
++ TP_ARGS(policy),
++ TP_STRUCT__entry(__field(int, policy)),
++ TP_fast_assign(tp_assign(policy, policy)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_get_priority_min
++SC_TRACE_EVENT(sys_sched_get_priority_min,
++ TP_PROTO(int policy),
++ TP_ARGS(policy),
++ TP_STRUCT__entry(__field(int, policy)),
++ TP_fast_assign(tp_assign(policy, policy)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setuid
++SC_TRACE_EVENT(sys_setuid,
++ TP_PROTO(uid_t uid),
++ TP_ARGS(uid),
++ TP_STRUCT__entry(__field(uid_t, uid)),
++ TP_fast_assign(tp_assign(uid, uid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setgid
++SC_TRACE_EVENT(sys_setgid,
++ TP_PROTO(gid_t gid),
++ TP_ARGS(gid),
++ TP_STRUCT__entry(__field(gid_t, gid)),
++ TP_fast_assign(tp_assign(gid, gid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setfsuid
++SC_TRACE_EVENT(sys_setfsuid,
++ TP_PROTO(uid_t uid),
++ TP_ARGS(uid),
++ TP_STRUCT__entry(__field(uid_t, uid)),
++ TP_fast_assign(tp_assign(uid, uid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setfsgid
++SC_TRACE_EVENT(sys_setfsgid,
++ TP_PROTO(gid_t gid),
++ TP_ARGS(gid),
++ TP_STRUCT__entry(__field(gid_t, gid)),
++ TP_fast_assign(tp_assign(gid, gid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_io_destroy
++SC_TRACE_EVENT(sys_io_destroy,
++ TP_PROTO(aio_context_t ctx),
++ TP_ARGS(ctx),
++ TP_STRUCT__entry(__field(aio_context_t, ctx)),
++ TP_fast_assign(tp_assign(ctx, ctx)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_exit_group
++SC_TRACE_EVENT(sys_exit_group,
++ TP_PROTO(int error_code),
++ TP_ARGS(error_code),
++ TP_STRUCT__entry(__field(int, error_code)),
++ TP_fast_assign(tp_assign(error_code, error_code)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_epoll_create
++SC_TRACE_EVENT(sys_epoll_create,
++ TP_PROTO(int size),
++ TP_ARGS(size),
++ TP_STRUCT__entry(__field(int, size)),
++ TP_fast_assign(tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timer_getoverrun
++SC_TRACE_EVENT(sys_timer_getoverrun,
++ TP_PROTO(timer_t timer_id),
++ TP_ARGS(timer_id),
++ TP_STRUCT__entry(__field(timer_t, timer_id)),
++ TP_fast_assign(tp_assign(timer_id, timer_id)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timer_delete
++SC_TRACE_EVENT(sys_timer_delete,
++ TP_PROTO(timer_t timer_id),
++ TP_ARGS(timer_id),
++ TP_STRUCT__entry(__field(timer_t, timer_id)),
++ TP_fast_assign(tp_assign(timer_id, timer_id)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_unshare
++SC_TRACE_EVENT(sys_unshare,
++ TP_PROTO(unsigned long unshare_flags),
++ TP_ARGS(unshare_flags),
++ TP_STRUCT__entry(__field(unsigned long, unshare_flags)),
++ TP_fast_assign(tp_assign(unshare_flags, unshare_flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_eventfd
++SC_TRACE_EVENT(sys_eventfd,
++ TP_PROTO(unsigned int count),
++ TP_ARGS(count),
++ TP_STRUCT__entry(__field(unsigned int, count)),
++ TP_fast_assign(tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_epoll_create1
++SC_TRACE_EVENT(sys_epoll_create1,
++ TP_PROTO(int flags),
++ TP_ARGS(flags),
++ TP_STRUCT__entry(__field(int, flags)),
++ TP_fast_assign(tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_inotify_init1
++SC_TRACE_EVENT(sys_inotify_init1,
++ TP_PROTO(int flags),
++ TP_ARGS(flags),
++ TP_STRUCT__entry(__field(int, flags)),
++ TP_fast_assign(tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_syncfs
++SC_TRACE_EVENT(sys_syncfs,
++ TP_PROTO(int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_kill
++SC_TRACE_EVENT(sys_kill,
++ TP_PROTO(pid_t pid, int sig),
++ TP_ARGS(pid, sig),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setpgid
++SC_TRACE_EVENT(sys_setpgid,
++ TP_PROTO(pid_t pid, pid_t pgid),
++ TP_ARGS(pid, pgid),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(pid_t, pgid)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(pgid, pgid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_dup2
++SC_TRACE_EVENT(sys_dup2,
++ TP_PROTO(unsigned int oldfd, unsigned int newfd),
++ TP_ARGS(oldfd, newfd),
++ TP_STRUCT__entry(__field(unsigned int, oldfd) __field(unsigned int, newfd)),
++ TP_fast_assign(tp_assign(oldfd, oldfd) tp_assign(newfd, newfd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setreuid16
++SC_TRACE_EVENT(sys_setreuid16,
++ TP_PROTO(old_uid_t ruid, old_uid_t euid),
++ TP_ARGS(ruid, euid),
++ TP_STRUCT__entry(__field(old_uid_t, ruid) __field(old_uid_t, euid)),
++ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setregid16
++SC_TRACE_EVENT(sys_setregid16,
++ TP_PROTO(old_gid_t rgid, old_gid_t egid),
++ TP_ARGS(rgid, egid),
++ TP_STRUCT__entry(__field(old_gid_t, rgid) __field(old_gid_t, egid)),
++ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_munmap
++SC_TRACE_EVENT(sys_munmap,
++ TP_PROTO(unsigned long addr, size_t len),
++ TP_ARGS(addr, len),
++ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(size_t, len)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ftruncate
++SC_TRACE_EVENT(sys_ftruncate,
++ TP_PROTO(unsigned int fd, unsigned long length),
++ TP_ARGS(fd, length),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned long, length)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(length, length)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fchmod
++SC_TRACE_EVENT(sys_fchmod,
++ TP_PROTO(unsigned int fd, umode_t mode),
++ TP_ARGS(fd, mode),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(umode_t, mode)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getpriority
++SC_TRACE_EVENT(sys_getpriority,
++ TP_PROTO(int which, int who),
++ TP_ARGS(which, who),
++ TP_STRUCT__entry(__field(int, which) __field(int, who)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_bdflush
++SC_TRACE_EVENT(sys_bdflush,
++ TP_PROTO(int func, long data),
++ TP_ARGS(func, data),
++ TP_STRUCT__entry(__field(int, func) __field(long, data)),
++ TP_fast_assign(tp_assign(func, func) tp_assign(data, data)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_flock
++SC_TRACE_EVENT(sys_flock,
++ TP_PROTO(unsigned int fd, unsigned int cmd),
++ TP_ARGS(fd, cmd),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mlock
++SC_TRACE_EVENT(sys_mlock,
++ TP_PROTO(unsigned long start, size_t len),
++ TP_ARGS(start, len),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_munlock
++SC_TRACE_EVENT(sys_munlock,
++ TP_PROTO(unsigned long start, size_t len),
++ TP_ARGS(start, len),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setreuid
++SC_TRACE_EVENT(sys_setreuid,
++ TP_PROTO(uid_t ruid, uid_t euid),
++ TP_ARGS(ruid, euid),
++ TP_STRUCT__entry(__field(uid_t, ruid) __field(uid_t, euid)),
++ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setregid
++SC_TRACE_EVENT(sys_setregid,
++ TP_PROTO(gid_t rgid, gid_t egid),
++ TP_ARGS(rgid, egid),
++ TP_STRUCT__entry(__field(gid_t, rgid) __field(gid_t, egid)),
++ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_tkill
++SC_TRACE_EVENT(sys_tkill,
++ TP_PROTO(pid_t pid, int sig),
++ TP_ARGS(pid, sig),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_listen
++SC_TRACE_EVENT(sys_listen,
++ TP_PROTO(int fd, int backlog),
++ TP_ARGS(fd, backlog),
++ TP_STRUCT__entry(__field(int, fd) __field(int, backlog)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(backlog, backlog)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_shutdown
++SC_TRACE_EVENT(sys_shutdown,
++ TP_PROTO(int fd, int how),
++ TP_ARGS(fd, how),
++ TP_STRUCT__entry(__field(int, fd) __field(int, how)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(how, how)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_msgget
++SC_TRACE_EVENT(sys_msgget,
++ TP_PROTO(key_t key, int msgflg),
++ TP_ARGS(key, msgflg),
++ TP_STRUCT__entry(__field(key_t, key) __field(int, msgflg)),
++ TP_fast_assign(tp_assign(key, key) tp_assign(msgflg, msgflg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ioprio_get
++SC_TRACE_EVENT(sys_ioprio_get,
++ TP_PROTO(int which, int who),
++ TP_ARGS(which, who),
++ TP_STRUCT__entry(__field(int, which) __field(int, who)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_inotify_rm_watch
++SC_TRACE_EVENT(sys_inotify_rm_watch,
++ TP_PROTO(int fd, __s32 wd),
++ TP_ARGS(fd, wd),
++ TP_STRUCT__entry(__field(int, fd) __field(__s32, wd)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(wd, wd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timerfd_create
++SC_TRACE_EVENT(sys_timerfd_create,
++ TP_PROTO(int clockid, int flags),
++ TP_ARGS(clockid, flags),
++ TP_STRUCT__entry(__field(int, clockid) __field(int, flags)),
++ TP_fast_assign(tp_assign(clockid, clockid) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_eventfd2
++SC_TRACE_EVENT(sys_eventfd2,
++ TP_PROTO(unsigned int count, int flags),
++ TP_ARGS(count, flags),
++ TP_STRUCT__entry(__field(unsigned int, count) __field(int, flags)),
++ TP_fast_assign(tp_assign(count, count) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fanotify_init
++SC_TRACE_EVENT(sys_fanotify_init,
++ TP_PROTO(unsigned int flags, unsigned int event_f_flags),
++ TP_ARGS(flags, event_f_flags),
++ TP_STRUCT__entry(__field(unsigned int, flags) __field(unsigned int, event_f_flags)),
++ TP_fast_assign(tp_assign(flags, flags) tp_assign(event_f_flags, event_f_flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setns
++SC_TRACE_EVENT(sys_setns,
++ TP_PROTO(int fd, int nstype),
++ TP_ARGS(fd, nstype),
++ TP_STRUCT__entry(__field(int, fd) __field(int, nstype)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(nstype, nstype)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lseek
++SC_TRACE_EVENT(sys_lseek,
++ TP_PROTO(unsigned int fd, off_t offset, unsigned int origin),
++ TP_ARGS(fd, offset, origin),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(off_t, offset) __field(unsigned int, origin)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(offset, offset) tp_assign(origin, origin)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ioctl
++SC_TRACE_EVENT(sys_ioctl,
++ TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
++ TP_ARGS(fd, cmd, arg),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fcntl
++SC_TRACE_EVENT(sys_fcntl,
++ TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
++ TP_ARGS(fd, cmd, arg),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fchown16
++SC_TRACE_EVENT(sys_fchown16,
++ TP_PROTO(unsigned int fd, old_uid_t user, old_gid_t group),
++ TP_ARGS(fd, user, group),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(old_uid_t, user) __field(old_gid_t, group)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setpriority
++SC_TRACE_EVENT(sys_setpriority,
++ TP_PROTO(int which, int who, int niceval),
++ TP_ARGS(which, who, niceval),
++ TP_STRUCT__entry(__field(int, which) __field(int, who) __field(int, niceval)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who) tp_assign(niceval, niceval)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mprotect
++SC_TRACE_EVENT(sys_mprotect,
++ TP_PROTO(unsigned long start, size_t len, unsigned long prot),
++ TP_ARGS(start, len, prot),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field(unsigned long, prot)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(prot, prot)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sysfs
++SC_TRACE_EVENT(sys_sysfs,
++ TP_PROTO(int option, unsigned long arg1, unsigned long arg2),
++ TP_ARGS(option, arg1, arg2),
++ TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg1) __field(unsigned long, arg2)),
++ TP_fast_assign(tp_assign(option, option) tp_assign(arg1, arg1) tp_assign(arg2, arg2)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_msync
++SC_TRACE_EVENT(sys_msync,
++ TP_PROTO(unsigned long start, size_t len, int flags),
++ TP_ARGS(start, len, flags),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field(int, flags)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setresuid16
++SC_TRACE_EVENT(sys_setresuid16,
++ TP_PROTO(old_uid_t ruid, old_uid_t euid, old_uid_t suid),
++ TP_ARGS(ruid, euid, suid),
++ TP_STRUCT__entry(__field(old_uid_t, ruid) __field(old_uid_t, euid) __field(old_uid_t, suid)),
++ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setresgid16
++SC_TRACE_EVENT(sys_setresgid16,
++ TP_PROTO(old_gid_t rgid, old_gid_t egid, old_gid_t sgid),
++ TP_ARGS(rgid, egid, sgid),
++ TP_STRUCT__entry(__field(old_gid_t, rgid) __field(old_gid_t, egid) __field(old_gid_t, sgid)),
++ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fchown
++SC_TRACE_EVENT(sys_fchown,
++ TP_PROTO(unsigned int fd, uid_t user, gid_t group),
++ TP_ARGS(fd, user, group),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(uid_t, user) __field(gid_t, group)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setresuid
++SC_TRACE_EVENT(sys_setresuid,
++ TP_PROTO(uid_t ruid, uid_t euid, uid_t suid),
++ TP_ARGS(ruid, euid, suid),
++ TP_STRUCT__entry(__field(uid_t, ruid) __field(uid_t, euid) __field(uid_t, suid)),
++ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setresgid
++SC_TRACE_EVENT(sys_setresgid,
++ TP_PROTO(gid_t rgid, gid_t egid, gid_t sgid),
++ TP_ARGS(rgid, egid, sgid),
++ TP_STRUCT__entry(__field(gid_t, rgid) __field(gid_t, egid) __field(gid_t, sgid)),
++ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_madvise
++SC_TRACE_EVENT(sys_madvise,
++ TP_PROTO(unsigned long start, size_t len_in, int behavior),
++ TP_ARGS(start, len_in, behavior),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len_in) __field(int, behavior)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len_in, len_in) tp_assign(behavior, behavior)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fcntl64
++SC_TRACE_EVENT(sys_fcntl64,
++ TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
++ TP_ARGS(fd, cmd, arg),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_tgkill
++SC_TRACE_EVENT(sys_tgkill,
++ TP_PROTO(pid_t tgid, pid_t pid, int sig),
++ TP_ARGS(tgid, pid, sig),
++ TP_STRUCT__entry(__field(pid_t, tgid) __field(pid_t, pid) __field(int, sig)),
++ TP_fast_assign(tp_assign(tgid, tgid) tp_assign(pid, pid) tp_assign(sig, sig)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_socket
++SC_TRACE_EVENT(sys_socket,
++ TP_PROTO(int family, int type, int protocol),
++ TP_ARGS(family, type, protocol),
++ TP_STRUCT__entry(__field(int, family) __field(int, type) __field(int, protocol)),
++ TP_fast_assign(tp_assign(family, family) tp_assign(type, type) tp_assign(protocol, protocol)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_semget
++SC_TRACE_EVENT(sys_semget,
++ TP_PROTO(key_t key, int nsems, int semflg),
++ TP_ARGS(key, nsems, semflg),
++ TP_STRUCT__entry(__field(key_t, key) __field(int, nsems) __field(int, semflg)),
++ TP_fast_assign(tp_assign(key, key) tp_assign(nsems, nsems) tp_assign(semflg, semflg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_shmget
++SC_TRACE_EVENT(sys_shmget,
++ TP_PROTO(key_t key, size_t size, int shmflg),
++ TP_ARGS(key, size, shmflg),
++ TP_STRUCT__entry(__field(key_t, key) __field(size_t, size) __field(int, shmflg)),
++ TP_fast_assign(tp_assign(key, key) tp_assign(size, size) tp_assign(shmflg, shmflg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ioprio_set
++SC_TRACE_EVENT(sys_ioprio_set,
++ TP_PROTO(int which, int who, int ioprio),
++ TP_ARGS(which, who, ioprio),
++ TP_STRUCT__entry(__field(int, which) __field(int, who) __field(int, ioprio)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who) tp_assign(ioprio, ioprio)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_dup3
++SC_TRACE_EVENT(sys_dup3,
++ TP_PROTO(unsigned int oldfd, unsigned int newfd, int flags),
++ TP_ARGS(oldfd, newfd, flags),
++ TP_STRUCT__entry(__field(unsigned int, oldfd) __field(unsigned int, newfd) __field(int, flags)),
++ TP_fast_assign(tp_assign(oldfd, oldfd) tp_assign(newfd, newfd) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ptrace
++SC_TRACE_EVENT(sys_ptrace,
++ TP_PROTO(long request, long pid, unsigned long addr, unsigned long data),
++ TP_ARGS(request, pid, addr, data),
++ TP_STRUCT__entry(__field(long, request) __field(long, pid) __field_hex(unsigned long, addr) __field(unsigned long, data)),
++ TP_fast_assign(tp_assign(request, request) tp_assign(pid, pid) tp_assign(addr, addr) tp_assign(data, data)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_tee
++SC_TRACE_EVENT(sys_tee,
++ TP_PROTO(int fdin, int fdout, size_t len, unsigned int flags),
++ TP_ARGS(fdin, fdout, len, flags),
++ TP_STRUCT__entry(__field(int, fdin) __field(int, fdout) __field(size_t, len) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fdin, fdin) tp_assign(fdout, fdout) tp_assign(len, len) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mremap
++SC_TRACE_EVENT(sys_mremap,
++ TP_PROTO(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr),
++ TP_ARGS(addr, old_len, new_len, flags, new_addr),
++ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(unsigned long, old_len) __field(unsigned long, new_len) __field(unsigned long, flags) __field_hex(unsigned long, new_addr)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(old_len, old_len) tp_assign(new_len, new_len) tp_assign(flags, flags) tp_assign(new_addr, new_addr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_prctl
++SC_TRACE_EVENT(sys_prctl,
++ TP_PROTO(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5),
++ TP_ARGS(option, arg2, arg3, arg4, arg5),
++ TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg2) __field(unsigned long, arg3) __field(unsigned long, arg4) __field(unsigned long, arg5)),
++ TP_fast_assign(tp_assign(option, option) tp_assign(arg2, arg2) tp_assign(arg3, arg3) tp_assign(arg4, arg4) tp_assign(arg5, arg5)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_remap_file_pages
++SC_TRACE_EVENT(sys_remap_file_pages,
++ TP_PROTO(unsigned long start, unsigned long size, unsigned long prot, unsigned long pgoff, unsigned long flags),
++ TP_ARGS(start, size, prot, pgoff, flags),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(unsigned long, size) __field(unsigned long, prot) __field(unsigned long, pgoff) __field(unsigned long, flags)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(size, size) tp_assign(prot, prot) tp_assign(pgoff, pgoff) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_keyctl
++SC_TRACE_EVENT(sys_keyctl,
++ TP_PROTO(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5),
++ TP_ARGS(option, arg2, arg3, arg4, arg5),
++ TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg2) __field(unsigned long, arg3) __field(unsigned long, arg4) __field(unsigned long, arg5)),
++ TP_fast_assign(tp_assign(option, option) tp_assign(arg2, arg2) tp_assign(arg3, arg3) tp_assign(arg4, arg4) tp_assign(arg5, arg5)),
++ TP_printk()
++)
++#endif
++
++#endif /* _TRACE_SYSCALLS_INTEGERS_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
++
++#else /* CREATE_SYSCALL_TABLE */
++
++#include "arm-32-syscalls-3.4.25_integers_override.h"
++#include "syscalls_integers_override.h"
++
++#ifndef OVERRIDE_TABLE_32_sys_restart_syscall
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_restart_syscall, 0, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getpid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getpid, 20, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getuid16
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getuid16, 24, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_pause
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_pause, 29, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sync
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sync, 36, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getgid16
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getgid16, 47, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_geteuid16
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_geteuid16, 49, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getegid16
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getegid16, 50, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getppid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getppid, 64, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getpgrp
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getpgrp, 65, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setsid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_setsid, 66, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_vhangup
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_vhangup, 111, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_munlockall
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_munlockall, 153, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_yield
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sched_yield, 158, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getuid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getuid, 199, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getgid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getgid, 200, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_geteuid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_geteuid, 201, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getegid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getegid, 202, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_gettid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_gettid, 224, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_inotify_init
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_inotify_init, 316, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_exit
++TRACE_SYSCALL_TABLE(sys_exit, sys_exit, 1, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_close
++TRACE_SYSCALL_TABLE(sys_close, sys_close, 6, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lseek
++TRACE_SYSCALL_TABLE(sys_lseek, sys_lseek, 19, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setuid16
++TRACE_SYSCALL_TABLE(sys_setuid16, sys_setuid16, 23, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ptrace
++TRACE_SYSCALL_TABLE(sys_ptrace, sys_ptrace, 26, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_nice
++TRACE_SYSCALL_TABLE(sys_nice, sys_nice, 34, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_kill
++TRACE_SYSCALL_TABLE(sys_kill, sys_kill, 37, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_dup
++TRACE_SYSCALL_TABLE(sys_dup, sys_dup, 41, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_brk
++TRACE_SYSCALL_TABLE(sys_brk, sys_brk, 45, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setgid16
++TRACE_SYSCALL_TABLE(sys_setgid16, sys_setgid16, 46, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ioctl
++TRACE_SYSCALL_TABLE(sys_ioctl, sys_ioctl, 54, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fcntl
++TRACE_SYSCALL_TABLE(sys_fcntl, sys_fcntl, 55, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setpgid
++TRACE_SYSCALL_TABLE(sys_setpgid, sys_setpgid, 57, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_umask
++TRACE_SYSCALL_TABLE(sys_umask, sys_umask, 60, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_dup2
++TRACE_SYSCALL_TABLE(sys_dup2, sys_dup2, 63, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setreuid16
++TRACE_SYSCALL_TABLE(sys_setreuid16, sys_setreuid16, 70, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setregid16
++TRACE_SYSCALL_TABLE(sys_setregid16, sys_setregid16, 71, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_munmap
++TRACE_SYSCALL_TABLE(sys_munmap, sys_munmap, 91, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ftruncate
++TRACE_SYSCALL_TABLE(sys_ftruncate, sys_ftruncate, 93, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fchmod
++TRACE_SYSCALL_TABLE(sys_fchmod, sys_fchmod, 94, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fchown16
++TRACE_SYSCALL_TABLE(sys_fchown16, sys_fchown16, 95, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getpriority
++TRACE_SYSCALL_TABLE(sys_getpriority, sys_getpriority, 96, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setpriority
++TRACE_SYSCALL_TABLE(sys_setpriority, sys_setpriority, 97, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fsync
++TRACE_SYSCALL_TABLE(sys_fsync, sys_fsync, 118, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mprotect
++TRACE_SYSCALL_TABLE(sys_mprotect, sys_mprotect, 125, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getpgid
++TRACE_SYSCALL_TABLE(sys_getpgid, sys_getpgid, 132, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fchdir
++TRACE_SYSCALL_TABLE(sys_fchdir, sys_fchdir, 133, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_bdflush
++TRACE_SYSCALL_TABLE(sys_bdflush, sys_bdflush, 134, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sysfs
++TRACE_SYSCALL_TABLE(sys_sysfs, sys_sysfs, 135, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_personality
++TRACE_SYSCALL_TABLE(sys_personality, sys_personality, 136, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setfsuid16
++TRACE_SYSCALL_TABLE(sys_setfsuid16, sys_setfsuid16, 138, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setfsgid16
++TRACE_SYSCALL_TABLE(sys_setfsgid16, sys_setfsgid16, 139, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_flock
++TRACE_SYSCALL_TABLE(sys_flock, sys_flock, 143, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_msync
++TRACE_SYSCALL_TABLE(sys_msync, sys_msync, 144, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getsid
++TRACE_SYSCALL_TABLE(sys_getsid, sys_getsid, 147, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fdatasync
++TRACE_SYSCALL_TABLE(sys_fdatasync, sys_fdatasync, 148, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mlock
++TRACE_SYSCALL_TABLE(sys_mlock, sys_mlock, 150, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_munlock
++TRACE_SYSCALL_TABLE(sys_munlock, sys_munlock, 151, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mlockall
++TRACE_SYSCALL_TABLE(sys_mlockall, sys_mlockall, 152, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_getscheduler
++TRACE_SYSCALL_TABLE(sys_sched_getscheduler, sys_sched_getscheduler, 157, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_get_priority_max
++TRACE_SYSCALL_TABLE(sys_sched_get_priority_max, sys_sched_get_priority_max, 159, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_get_priority_min
++TRACE_SYSCALL_TABLE(sys_sched_get_priority_min, sys_sched_get_priority_min, 160, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mremap
++TRACE_SYSCALL_TABLE(sys_mremap, sys_mremap, 163, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setresuid16
++TRACE_SYSCALL_TABLE(sys_setresuid16, sys_setresuid16, 164, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setresgid16
++TRACE_SYSCALL_TABLE(sys_setresgid16, sys_setresgid16, 170, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_prctl
++TRACE_SYSCALL_TABLE(sys_prctl, sys_prctl, 172, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setreuid
++TRACE_SYSCALL_TABLE(sys_setreuid, sys_setreuid, 203, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setregid
++TRACE_SYSCALL_TABLE(sys_setregid, sys_setregid, 204, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fchown
++TRACE_SYSCALL_TABLE(sys_fchown, sys_fchown, 207, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setresuid
++TRACE_SYSCALL_TABLE(sys_setresuid, sys_setresuid, 208, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setresgid
++TRACE_SYSCALL_TABLE(sys_setresgid, sys_setresgid, 210, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setuid
++TRACE_SYSCALL_TABLE(sys_setuid, sys_setuid, 213, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setgid
++TRACE_SYSCALL_TABLE(sys_setgid, sys_setgid, 214, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setfsuid
++TRACE_SYSCALL_TABLE(sys_setfsuid, sys_setfsuid, 215, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setfsgid
++TRACE_SYSCALL_TABLE(sys_setfsgid, sys_setfsgid, 216, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_madvise
++TRACE_SYSCALL_TABLE(sys_madvise, sys_madvise, 220, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fcntl64
++TRACE_SYSCALL_TABLE(sys_fcntl64, sys_fcntl64, 221, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_tkill
++TRACE_SYSCALL_TABLE(sys_tkill, sys_tkill, 238, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_io_destroy
++TRACE_SYSCALL_TABLE(sys_io_destroy, sys_io_destroy, 244, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_exit_group
++TRACE_SYSCALL_TABLE(sys_exit_group, sys_exit_group, 248, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_epoll_create
++TRACE_SYSCALL_TABLE(sys_epoll_create, sys_epoll_create, 250, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_remap_file_pages
++TRACE_SYSCALL_TABLE(sys_remap_file_pages, sys_remap_file_pages, 253, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timer_getoverrun
++TRACE_SYSCALL_TABLE(sys_timer_getoverrun, sys_timer_getoverrun, 260, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timer_delete
++TRACE_SYSCALL_TABLE(sys_timer_delete, sys_timer_delete, 261, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_tgkill
++TRACE_SYSCALL_TABLE(sys_tgkill, sys_tgkill, 268, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_socket
++TRACE_SYSCALL_TABLE(sys_socket, sys_socket, 281, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_listen
++TRACE_SYSCALL_TABLE(sys_listen, sys_listen, 284, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_shutdown
++TRACE_SYSCALL_TABLE(sys_shutdown, sys_shutdown, 293, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_semget
++TRACE_SYSCALL_TABLE(sys_semget, sys_semget, 299, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_msgget
++TRACE_SYSCALL_TABLE(sys_msgget, sys_msgget, 303, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_shmget
++TRACE_SYSCALL_TABLE(sys_shmget, sys_shmget, 307, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_keyctl
++TRACE_SYSCALL_TABLE(sys_keyctl, sys_keyctl, 311, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ioprio_set
++TRACE_SYSCALL_TABLE(sys_ioprio_set, sys_ioprio_set, 314, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ioprio_get
++TRACE_SYSCALL_TABLE(sys_ioprio_get, sys_ioprio_get, 315, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_inotify_rm_watch
++TRACE_SYSCALL_TABLE(sys_inotify_rm_watch, sys_inotify_rm_watch, 318, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_unshare
++TRACE_SYSCALL_TABLE(sys_unshare, sys_unshare, 337, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_tee
++TRACE_SYSCALL_TABLE(sys_tee, sys_tee, 342, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timerfd_create
++TRACE_SYSCALL_TABLE(sys_timerfd_create, sys_timerfd_create, 350, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_eventfd
++TRACE_SYSCALL_TABLE(sys_eventfd, sys_eventfd, 351, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_eventfd2
++TRACE_SYSCALL_TABLE(sys_eventfd2, sys_eventfd2, 356, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_epoll_create1
++TRACE_SYSCALL_TABLE(sys_epoll_create1, sys_epoll_create1, 357, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_dup3
++TRACE_SYSCALL_TABLE(sys_dup3, sys_dup3, 358, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_inotify_init1
++TRACE_SYSCALL_TABLE(sys_inotify_init1, sys_inotify_init1, 360, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fanotify_init
++TRACE_SYSCALL_TABLE(sys_fanotify_init, sys_fanotify_init, 367, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_syncfs
++TRACE_SYSCALL_TABLE(sys_syncfs, sys_syncfs, 373, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setns
++TRACE_SYSCALL_TABLE(sys_setns, sys_setns, 375, 2)
++#endif
++
++#endif /* CREATE_SYSCALL_TABLE */
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/arm-32-syscalls-3.4.25_integers_override.h
+@@ -0,0 +1,52 @@
++
++
++#define OVERRIDE_TABLE_32_sys_arm_fadvise64_64
++#define OVERRIDE_TABLE_32_sys_sync_file_range2
++
++#ifndef CREATE_SYSCALL_TABLE
++
++SC_TRACE_EVENT(sys_arm_fadvise64_64,
++ TP_PROTO(int fd, int advice, loff_t offset, loff_t len),
++ TP_ARGS(fd, advice, offset, len),
++ TP_STRUCT__entry(
++ __field_hex(int, fd)
++ __field_hex(int, advice)
++ __field_hex(loff_t, offset)
++ __field_hex(loff_t, len)),
++ TP_fast_assign(
++ tp_assign(fd, fd)
++ tp_assign(advice, advice)
++ tp_assign(offset, offset)
++ tp_assign(len, len)),
++ TP_printk()
++)
++
++SC_TRACE_EVENT(sys_sync_file_range2,
++ TP_PROTO(int fd, loff_t offset, loff_t nbytes, unsigned int flags),
++ TP_ARGS(fd, offset, nbytes, flags),
++ TP_STRUCT__entry(
++ __field_hex(int, fd)
++ __field_hex(loff_t, offset)
++ __field_hex(loff_t, nbytes)
++ __field_hex(unsigned int, flags)),
++ TP_fast_assign(
++ tp_assign(fd, fd)
++ tp_assign(offset, offset)
++ tp_assign(nbytes, nbytes)
++ tp_assign(flags, flags)),
++ TP_printk()
++)
++
++#else /* CREATE_SYSCALL_TABLE */
++
++#define OVVERRIDE_TABLE_32_sys_mmap
++TRACE_SYSCALL_TABLE(sys_mmap, sys_mmap, 90, 6)
++
++#define OVERRIDE_TABLE_32_sys_arm_fadvise64_64
++TRACE_SYSCALL_TABLE(sys_arm_fadvise64_64, sys_arm_fadvise64_64, 270, 4)
++#define OVERRIDE_TABLE_32_sys_sync_file_range2
++TRACE_SYSCALL_TABLE(sys_sync_file_range2, sys_sync_file_range2, 341, 4)
++
++#endif /* CREATE_SYSCALL_TABLE */
++
++
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/arm-32-syscalls-3.4.25_pointers.h
+@@ -0,0 +1,2316 @@
++/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */
++#ifndef CREATE_SYSCALL_TABLE
++
++#if !defined(_TRACE_SYSCALLS_POINTERS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SYSCALLS_POINTERS_H
++
++#include <linux/tracepoint.h>
++#include <linux/syscalls.h>
++#include "arm-32-syscalls-3.4.25_pointers_override.h"
++#include "syscalls_pointers_override.h"
++
++#ifndef OVERRIDE_32_sys_unlink
++SC_TRACE_EVENT(sys_unlink,
++ TP_PROTO(const char * pathname),
++ TP_ARGS(pathname),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_chdir
++SC_TRACE_EVENT(sys_chdir,
++ TP_PROTO(const char * filename),
++ TP_ARGS(filename),
++ TP_STRUCT__entry(__string_from_user(filename, filename)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rmdir
++SC_TRACE_EVENT(sys_rmdir,
++ TP_PROTO(const char * pathname),
++ TP_ARGS(pathname),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_pipe
++SC_TRACE_EVENT(sys_pipe,
++ TP_PROTO(int * fildes),
++ TP_ARGS(fildes),
++ TP_STRUCT__entry(__field_hex(int *, fildes)),
++ TP_fast_assign(tp_assign(fildes, fildes)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_times
++SC_TRACE_EVENT(sys_times,
++ TP_PROTO(struct tms * tbuf),
++ TP_ARGS(tbuf),
++ TP_STRUCT__entry(__field_hex(struct tms *, tbuf)),
++ TP_fast_assign(tp_assign(tbuf, tbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_acct
++SC_TRACE_EVENT(sys_acct,
++ TP_PROTO(const char * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__string_from_user(name, name)),
++ TP_fast_assign(tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_chroot
++SC_TRACE_EVENT(sys_chroot,
++ TP_PROTO(const char * filename),
++ TP_ARGS(filename),
++ TP_STRUCT__entry(__string_from_user(filename, filename)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sigpending
++SC_TRACE_EVENT(sys_sigpending,
++ TP_PROTO(old_sigset_t * set),
++ TP_ARGS(set),
++ TP_STRUCT__entry(__field_hex(old_sigset_t *, set)),
++ TP_fast_assign(tp_assign(set, set)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_uselib
++SC_TRACE_EVENT(sys_uselib,
++ TP_PROTO(const char * library),
++ TP_ARGS(library),
++ TP_STRUCT__entry(__field_hex(const char *, library)),
++ TP_fast_assign(tp_assign(library, library)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_swapoff
++SC_TRACE_EVENT(sys_swapoff,
++ TP_PROTO(const char * specialfile),
++ TP_ARGS(specialfile),
++ TP_STRUCT__entry(__string_from_user(specialfile, specialfile)),
++ TP_fast_assign(tp_copy_string_from_user(specialfile, specialfile)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sysinfo
++SC_TRACE_EVENT(sys_sysinfo,
++ TP_PROTO(struct sysinfo * info),
++ TP_ARGS(info),
++ TP_STRUCT__entry(__field_hex(struct sysinfo *, info)),
++ TP_fast_assign(tp_assign(info, info)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_newuname
++SC_TRACE_EVENT(sys_newuname,
++ TP_PROTO(struct new_utsname * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__field_hex(struct new_utsname *, name)),
++ TP_fast_assign(tp_assign(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_adjtimex
++SC_TRACE_EVENT(sys_adjtimex,
++ TP_PROTO(struct timex * txc_p),
++ TP_ARGS(txc_p),
++ TP_STRUCT__entry(__field_hex(struct timex *, txc_p)),
++ TP_fast_assign(tp_assign(txc_p, txc_p)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sysctl
++SC_TRACE_EVENT(sys_sysctl,
++ TP_PROTO(struct __sysctl_args * args),
++ TP_ARGS(args),
++ TP_STRUCT__entry(__field_hex(struct __sysctl_args *, args)),
++ TP_fast_assign(tp_assign(args, args)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_set_tid_address
++SC_TRACE_EVENT(sys_set_tid_address,
++ TP_PROTO(int * tidptr),
++ TP_ARGS(tidptr),
++ TP_STRUCT__entry(__field_hex(int *, tidptr)),
++ TP_fast_assign(tp_assign(tidptr, tidptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mq_unlink
++SC_TRACE_EVENT(sys_mq_unlink,
++ TP_PROTO(const char * u_name),
++ TP_ARGS(u_name),
++ TP_STRUCT__entry(__string_from_user(u_name, u_name)),
++ TP_fast_assign(tp_copy_string_from_user(u_name, u_name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_shmdt
++SC_TRACE_EVENT(sys_shmdt,
++ TP_PROTO(char * shmaddr),
++ TP_ARGS(shmaddr),
++ TP_STRUCT__entry(__field_hex(char *, shmaddr)),
++ TP_fast_assign(tp_assign(shmaddr, shmaddr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_creat
++SC_TRACE_EVENT(sys_creat,
++ TP_PROTO(const char * pathname, umode_t mode),
++ TP_ARGS(pathname, mode),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(umode_t, mode)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_link
++SC_TRACE_EVENT(sys_link,
++ TP_PROTO(const char * oldname, const char * newname),
++ TP_ARGS(oldname, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_chmod
++SC_TRACE_EVENT(sys_chmod,
++ TP_PROTO(const char * filename, umode_t mode),
++ TP_ARGS(filename, mode),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(umode_t, mode)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_access
++SC_TRACE_EVENT(sys_access,
++ TP_PROTO(const char * filename, int mode),
++ TP_ARGS(filename, mode),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, mode)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rename
++SC_TRACE_EVENT(sys_rename,
++ TP_PROTO(const char * oldname, const char * newname),
++ TP_ARGS(oldname, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mkdir
++SC_TRACE_EVENT(sys_mkdir,
++ TP_PROTO(const char * pathname, umode_t mode),
++ TP_ARGS(pathname, mode),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(umode_t, mode)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_umount
++SC_TRACE_EVENT(sys_umount,
++ TP_PROTO(char * name, int flags),
++ TP_ARGS(name, flags),
++ TP_STRUCT__entry(__string_from_user(name, name) __field(int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ustat
++SC_TRACE_EVENT(sys_ustat,
++ TP_PROTO(unsigned dev, struct ustat * ubuf),
++ TP_ARGS(dev, ubuf),
++ TP_STRUCT__entry(__field(unsigned, dev) __field_hex(struct ustat *, ubuf)),
++ TP_fast_assign(tp_assign(dev, dev) tp_assign(ubuf, ubuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sethostname
++SC_TRACE_EVENT(sys_sethostname,
++ TP_PROTO(char * name, int len),
++ TP_ARGS(name, len),
++ TP_STRUCT__entry(__string_from_user(name, name) __field(int, len)),
++ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setrlimit
++SC_TRACE_EVENT(sys_setrlimit,
++ TP_PROTO(unsigned int resource, struct rlimit * rlim),
++ TP_ARGS(resource, rlim),
++ TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
++ TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getrusage
++SC_TRACE_EVENT(sys_getrusage,
++ TP_PROTO(int who, struct rusage * ru),
++ TP_ARGS(who, ru),
++ TP_STRUCT__entry(__field(int, who) __field_hex(struct rusage *, ru)),
++ TP_fast_assign(tp_assign(who, who) tp_assign(ru, ru)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_gettimeofday
++SC_TRACE_EVENT(sys_gettimeofday,
++ TP_PROTO(struct timeval * tv, struct timezone * tz),
++ TP_ARGS(tv, tz),
++ TP_STRUCT__entry(__field_hex(struct timeval *, tv) __field_hex(struct timezone *, tz)),
++ TP_fast_assign(tp_assign(tv, tv) tp_assign(tz, tz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_settimeofday
++SC_TRACE_EVENT(sys_settimeofday,
++ TP_PROTO(struct timeval * tv, struct timezone * tz),
++ TP_ARGS(tv, tz),
++ TP_STRUCT__entry(__field_hex(struct timeval *, tv) __field_hex(struct timezone *, tz)),
++ TP_fast_assign(tp_assign(tv, tv) tp_assign(tz, tz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getgroups16
++SC_TRACE_EVENT(sys_getgroups16,
++ TP_PROTO(int gidsetsize, old_gid_t * grouplist),
++ TP_ARGS(gidsetsize, grouplist),
++ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(old_gid_t *, grouplist)),
++ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setgroups16
++SC_TRACE_EVENT(sys_setgroups16,
++ TP_PROTO(int gidsetsize, old_gid_t * grouplist),
++ TP_ARGS(gidsetsize, grouplist),
++ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(old_gid_t *, grouplist)),
++ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_symlink
++SC_TRACE_EVENT(sys_symlink,
++ TP_PROTO(const char * oldname, const char * newname),
++ TP_ARGS(oldname, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_swapon
++SC_TRACE_EVENT(sys_swapon,
++ TP_PROTO(const char * specialfile, int swap_flags),
++ TP_ARGS(specialfile, swap_flags),
++ TP_STRUCT__entry(__string_from_user(specialfile, specialfile) __field(int, swap_flags)),
++ TP_fast_assign(tp_copy_string_from_user(specialfile, specialfile) tp_assign(swap_flags, swap_flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_truncate
++SC_TRACE_EVENT(sys_truncate,
++ TP_PROTO(const char * path, long length),
++ TP_ARGS(path, length),
++ TP_STRUCT__entry(__string_from_user(path, path) __field(long, length)),
++ TP_fast_assign(tp_copy_string_from_user(path, path) tp_assign(length, length)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_statfs
++SC_TRACE_EVENT(sys_statfs,
++ TP_PROTO(const char * pathname, struct statfs * buf),
++ TP_ARGS(pathname, buf),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(struct statfs *, buf)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fstatfs
++SC_TRACE_EVENT(sys_fstatfs,
++ TP_PROTO(unsigned int fd, struct statfs * buf),
++ TP_ARGS(fd, buf),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct statfs *, buf)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getitimer
++SC_TRACE_EVENT(sys_getitimer,
++ TP_PROTO(int which, struct itimerval * value),
++ TP_ARGS(which, value),
++ TP_STRUCT__entry(__field(int, which) __field_hex(struct itimerval *, value)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(value, value)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_newstat
++SC_TRACE_EVENT(sys_newstat,
++ TP_PROTO(const char * filename, struct stat * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_newlstat
++SC_TRACE_EVENT(sys_newlstat,
++ TP_PROTO(const char * filename, struct stat * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_newfstat
++SC_TRACE_EVENT(sys_newfstat,
++ TP_PROTO(unsigned int fd, struct stat * statbuf),
++ TP_ARGS(fd, statbuf),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct stat *, statbuf)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setdomainname
++SC_TRACE_EVENT(sys_setdomainname,
++ TP_PROTO(char * name, int len),
++ TP_ARGS(name, len),
++ TP_STRUCT__entry(__string_from_user(name, name) __field(int, len)),
++ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_delete_module
++SC_TRACE_EVENT(sys_delete_module,
++ TP_PROTO(const char * name_user, unsigned int flags),
++ TP_ARGS(name_user, flags),
++ TP_STRUCT__entry(__string_from_user(name_user, name_user) __field(unsigned int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(name_user, name_user) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_setparam
++SC_TRACE_EVENT(sys_sched_setparam,
++ TP_PROTO(pid_t pid, struct sched_param * param),
++ TP_ARGS(pid, param),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct sched_param *, param)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(param, param)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_getparam
++SC_TRACE_EVENT(sys_sched_getparam,
++ TP_PROTO(pid_t pid, struct sched_param * param),
++ TP_ARGS(pid, param),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct sched_param *, param)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(param, param)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_rr_get_interval
++SC_TRACE_EVENT(sys_sched_rr_get_interval,
++ TP_PROTO(pid_t pid, struct timespec * interval),
++ TP_ARGS(pid, interval),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct timespec *, interval)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(interval, interval)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_nanosleep
++SC_TRACE_EVENT(sys_nanosleep,
++ TP_PROTO(struct timespec * rqtp, struct timespec * rmtp),
++ TP_ARGS(rqtp, rmtp),
++ TP_STRUCT__entry(__field_hex(struct timespec *, rqtp) __field_hex(struct timespec *, rmtp)),
++ TP_fast_assign(tp_assign(rqtp, rqtp) tp_assign(rmtp, rmtp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rt_sigpending
++SC_TRACE_EVENT(sys_rt_sigpending,
++ TP_PROTO(sigset_t * set, size_t sigsetsize),
++ TP_ARGS(set, sigsetsize),
++ TP_STRUCT__entry(__field_hex(sigset_t *, set) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(set, set) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rt_sigsuspend
++SC_TRACE_EVENT(sys_rt_sigsuspend,
++ TP_PROTO(sigset_t * unewset, size_t sigsetsize),
++ TP_ARGS(unewset, sigsetsize),
++ TP_STRUCT__entry(__field_hex(sigset_t *, unewset) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(unewset, unewset) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getcwd
++SC_TRACE_EVENT(sys_getcwd,
++ TP_PROTO(char * buf, unsigned long size),
++ TP_ARGS(buf, size),
++ TP_STRUCT__entry(__field_hex(char *, buf) __field(unsigned long, size)),
++ TP_fast_assign(tp_assign(buf, buf) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getrlimit
++SC_TRACE_EVENT(sys_getrlimit,
++ TP_PROTO(unsigned int resource, struct rlimit * rlim),
++ TP_ARGS(resource, rlim),
++ TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
++ TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_stat64
++SC_TRACE_EVENT(sys_stat64,
++ TP_PROTO(const char * filename, struct stat64 * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat64 *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lstat64
++SC_TRACE_EVENT(sys_lstat64,
++ TP_PROTO(const char * filename, struct stat64 * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat64 *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fstat64
++SC_TRACE_EVENT(sys_fstat64,
++ TP_PROTO(unsigned long fd, struct stat64 * statbuf),
++ TP_ARGS(fd, statbuf),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(struct stat64 *, statbuf)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getgroups
++SC_TRACE_EVENT(sys_getgroups,
++ TP_PROTO(int gidsetsize, gid_t * grouplist),
++ TP_ARGS(gidsetsize, grouplist),
++ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(gid_t *, grouplist)),
++ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setgroups
++SC_TRACE_EVENT(sys_setgroups,
++ TP_PROTO(int gidsetsize, gid_t * grouplist),
++ TP_ARGS(gidsetsize, grouplist),
++ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(gid_t *, grouplist)),
++ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_pivot_root
++SC_TRACE_EVENT(sys_pivot_root,
++ TP_PROTO(const char * new_root, const char * put_old),
++ TP_ARGS(new_root, put_old),
++ TP_STRUCT__entry(__string_from_user(new_root, new_root) __string_from_user(put_old, put_old)),
++ TP_fast_assign(tp_copy_string_from_user(new_root, new_root) tp_copy_string_from_user(put_old, put_old)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_removexattr
++SC_TRACE_EVENT(sys_removexattr,
++ TP_PROTO(const char * pathname, const char * name),
++ TP_ARGS(pathname, name),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lremovexattr
++SC_TRACE_EVENT(sys_lremovexattr,
++ TP_PROTO(const char * pathname, const char * name),
++ TP_ARGS(pathname, name),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fremovexattr
++SC_TRACE_EVENT(sys_fremovexattr,
++ TP_PROTO(int fd, const char * name),
++ TP_ARGS(fd, name),
++ TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name)),
++ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_io_setup
++SC_TRACE_EVENT(sys_io_setup,
++ TP_PROTO(unsigned nr_events, aio_context_t * ctxp),
++ TP_ARGS(nr_events, ctxp),
++ TP_STRUCT__entry(__field(unsigned, nr_events) __field_hex(aio_context_t *, ctxp)),
++ TP_fast_assign(tp_assign(nr_events, nr_events) tp_assign(ctxp, ctxp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timer_gettime
++SC_TRACE_EVENT(sys_timer_gettime,
++ TP_PROTO(timer_t timer_id, struct itimerspec * setting),
++ TP_ARGS(timer_id, setting),
++ TP_STRUCT__entry(__field(timer_t, timer_id) __field_hex(struct itimerspec *, setting)),
++ TP_fast_assign(tp_assign(timer_id, timer_id) tp_assign(setting, setting)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_clock_settime
++SC_TRACE_EVENT(sys_clock_settime,
++ TP_PROTO(const clockid_t which_clock, const struct timespec * tp),
++ TP_ARGS(which_clock, tp),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(const struct timespec *, tp)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_clock_gettime
++SC_TRACE_EVENT(sys_clock_gettime,
++ TP_PROTO(const clockid_t which_clock, struct timespec * tp),
++ TP_ARGS(which_clock, tp),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timespec *, tp)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_clock_getres
++SC_TRACE_EVENT(sys_clock_getres,
++ TP_PROTO(const clockid_t which_clock, struct timespec * tp),
++ TP_ARGS(which_clock, tp),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timespec *, tp)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_utimes
++SC_TRACE_EVENT(sys_utimes,
++ TP_PROTO(char * filename, struct timeval * utimes),
++ TP_ARGS(filename, utimes),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct timeval *, utimes)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mq_notify
++SC_TRACE_EVENT(sys_mq_notify,
++ TP_PROTO(mqd_t mqdes, const struct sigevent * u_notification),
++ TP_ARGS(mqdes, u_notification),
++ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const struct sigevent *, u_notification)),
++ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_notification, u_notification)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_set_robust_list
++SC_TRACE_EVENT(sys_set_robust_list,
++ TP_PROTO(struct robust_list_head * head, size_t len),
++ TP_ARGS(head, len),
++ TP_STRUCT__entry(__field_hex(struct robust_list_head *, head) __field(size_t, len)),
++ TP_fast_assign(tp_assign(head, head) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timerfd_gettime
++SC_TRACE_EVENT(sys_timerfd_gettime,
++ TP_PROTO(int ufd, struct itimerspec * otmr),
++ TP_ARGS(ufd, otmr),
++ TP_STRUCT__entry(__field(int, ufd) __field_hex(struct itimerspec *, otmr)),
++ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(otmr, otmr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_pipe2
++SC_TRACE_EVENT(sys_pipe2,
++ TP_PROTO(int * fildes, int flags),
++ TP_ARGS(fildes, flags),
++ TP_STRUCT__entry(__field_hex(int *, fildes) __field(int, flags)),
++ TP_fast_assign(tp_assign(fildes, fildes) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_clock_adjtime
++SC_TRACE_EVENT(sys_clock_adjtime,
++ TP_PROTO(const clockid_t which_clock, struct timex * utx),
++ TP_ARGS(which_clock, utx),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timex *, utx)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(utx, utx)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_read
++SC_TRACE_EVENT(sys_read,
++ TP_PROTO(unsigned int fd, char * buf, size_t count),
++ TP_ARGS(fd, buf, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(char *, buf) __field(size_t, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_write
++SC_TRACE_EVENT(sys_write,
++ TP_PROTO(unsigned int fd, const char * buf, size_t count),
++ TP_ARGS(fd, buf, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(const char *, buf) __field(size_t, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_open
++SC_TRACE_EVENT(sys_open,
++ TP_PROTO(const char * filename, int flags, umode_t mode),
++ TP_ARGS(filename, flags, mode),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, flags) __field(umode_t, mode)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(flags, flags) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mknod
++SC_TRACE_EVENT(sys_mknod,
++ TP_PROTO(const char * filename, umode_t mode, unsigned dev),
++ TP_ARGS(filename, mode, dev),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(umode_t, mode) __field(unsigned, dev)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode) tp_assign(dev, dev)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lchown16
++SC_TRACE_EVENT(sys_lchown16,
++ TP_PROTO(const char * filename, old_uid_t user, old_gid_t group),
++ TP_ARGS(filename, user, group),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(old_uid_t, user) __field(old_gid_t, group)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_readlink
++SC_TRACE_EVENT(sys_readlink,
++ TP_PROTO(const char * path, char * buf, int bufsiz),
++ TP_ARGS(path, buf, bufsiz),
++ TP_STRUCT__entry(__string_from_user(path, path) __field_hex(char *, buf) __field(int, bufsiz)),
++ TP_fast_assign(tp_copy_string_from_user(path, path) tp_assign(buf, buf) tp_assign(bufsiz, bufsiz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_syslog
++SC_TRACE_EVENT(sys_syslog,
++ TP_PROTO(int type, char * buf, int len),
++ TP_ARGS(type, buf, len),
++ TP_STRUCT__entry(__field(int, type) __field_hex(char *, buf) __field(int, len)),
++ TP_fast_assign(tp_assign(type, type) tp_assign(buf, buf) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setitimer
++SC_TRACE_EVENT(sys_setitimer,
++ TP_PROTO(int which, struct itimerval * value, struct itimerval * ovalue),
++ TP_ARGS(which, value, ovalue),
++ TP_STRUCT__entry(__field(int, which) __field_hex(struct itimerval *, value) __field_hex(struct itimerval *, ovalue)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(value, value) tp_assign(ovalue, ovalue)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sigprocmask
++SC_TRACE_EVENT(sys_sigprocmask,
++ TP_PROTO(int how, old_sigset_t * nset, old_sigset_t * oset),
++ TP_ARGS(how, nset, oset),
++ TP_STRUCT__entry(__field(int, how) __field_hex(old_sigset_t *, nset) __field_hex(old_sigset_t *, oset)),
++ TP_fast_assign(tp_assign(how, how) tp_assign(nset, nset) tp_assign(oset, oset)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_init_module
++SC_TRACE_EVENT(sys_init_module,
++ TP_PROTO(void * umod, unsigned long len, const char * uargs),
++ TP_ARGS(umod, len, uargs),
++ TP_STRUCT__entry(__field_hex(void *, umod) __field(unsigned long, len) __field_hex(const char *, uargs)),
++ TP_fast_assign(tp_assign(umod, umod) tp_assign(len, len) tp_assign(uargs, uargs)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getdents
++SC_TRACE_EVENT(sys_getdents,
++ TP_PROTO(unsigned int fd, struct linux_dirent * dirent, unsigned int count),
++ TP_ARGS(fd, dirent, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct linux_dirent *, dirent) __field(unsigned int, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_readv
++SC_TRACE_EVENT(sys_readv,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen),
++ TP_ARGS(fd, vec, vlen),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_writev
++SC_TRACE_EVENT(sys_writev,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen),
++ TP_ARGS(fd, vec, vlen),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_setscheduler
++SC_TRACE_EVENT(sys_sched_setscheduler,
++ TP_PROTO(pid_t pid, int policy, struct sched_param * param),
++ TP_ARGS(pid, policy, param),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, policy) __field_hex(struct sched_param *, param)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(policy, policy) tp_assign(param, param)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getresuid16
++SC_TRACE_EVENT(sys_getresuid16,
++ TP_PROTO(old_uid_t * ruid, old_uid_t * euid, old_uid_t * suid),
++ TP_ARGS(ruid, euid, suid),
++ TP_STRUCT__entry(__field_hex(old_uid_t *, ruid) __field_hex(old_uid_t *, euid) __field_hex(old_uid_t *, suid)),
++ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_poll
++SC_TRACE_EVENT(sys_poll,
++ TP_PROTO(struct pollfd * ufds, unsigned int nfds, int timeout_msecs),
++ TP_ARGS(ufds, nfds, timeout_msecs),
++ TP_STRUCT__entry(__field_hex(struct pollfd *, ufds) __field(unsigned int, nfds) __field(int, timeout_msecs)),
++ TP_fast_assign(tp_assign(ufds, ufds) tp_assign(nfds, nfds) tp_assign(timeout_msecs, timeout_msecs)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getresgid16
++SC_TRACE_EVENT(sys_getresgid16,
++ TP_PROTO(old_gid_t * rgid, old_gid_t * egid, old_gid_t * sgid),
++ TP_ARGS(rgid, egid, sgid),
++ TP_STRUCT__entry(__field_hex(old_gid_t *, rgid) __field_hex(old_gid_t *, egid) __field_hex(old_gid_t *, sgid)),
++ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rt_sigqueueinfo
++SC_TRACE_EVENT(sys_rt_sigqueueinfo,
++ TP_PROTO(pid_t pid, int sig, siginfo_t * uinfo),
++ TP_ARGS(pid, sig, uinfo),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig) __field_hex(siginfo_t *, uinfo)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig) tp_assign(uinfo, uinfo)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_chown16
++SC_TRACE_EVENT(sys_chown16,
++ TP_PROTO(const char * filename, old_uid_t user, old_gid_t group),
++ TP_ARGS(filename, user, group),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(old_uid_t, user) __field(old_gid_t, group)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lchown
++SC_TRACE_EVENT(sys_lchown,
++ TP_PROTO(const char * filename, uid_t user, gid_t group),
++ TP_ARGS(filename, user, group),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getresuid
++SC_TRACE_EVENT(sys_getresuid,
++ TP_PROTO(uid_t * ruid, uid_t * euid, uid_t * suid),
++ TP_ARGS(ruid, euid, suid),
++ TP_STRUCT__entry(__field_hex(uid_t *, ruid) __field_hex(uid_t *, euid) __field_hex(uid_t *, suid)),
++ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getresgid
++SC_TRACE_EVENT(sys_getresgid,
++ TP_PROTO(gid_t * rgid, gid_t * egid, gid_t * sgid),
++ TP_ARGS(rgid, egid, sgid),
++ TP_STRUCT__entry(__field_hex(gid_t *, rgid) __field_hex(gid_t *, egid) __field_hex(gid_t *, sgid)),
++ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_chown
++SC_TRACE_EVENT(sys_chown,
++ TP_PROTO(const char * filename, uid_t user, gid_t group),
++ TP_ARGS(filename, user, group),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getdents64
++SC_TRACE_EVENT(sys_getdents64,
++ TP_PROTO(unsigned int fd, struct linux_dirent64 * dirent, unsigned int count),
++ TP_ARGS(fd, dirent, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct linux_dirent64 *, dirent) __field(unsigned int, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mincore
++SC_TRACE_EVENT(sys_mincore,
++ TP_PROTO(unsigned long start, size_t len, unsigned char * vec),
++ TP_ARGS(start, len, vec),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field_hex(unsigned char *, vec)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(vec, vec)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_listxattr
++SC_TRACE_EVENT(sys_listxattr,
++ TP_PROTO(const char * pathname, char * list, size_t size),
++ TP_ARGS(pathname, list, size),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(char *, list) __field(size_t, size)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(list, list) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_llistxattr
++SC_TRACE_EVENT(sys_llistxattr,
++ TP_PROTO(const char * pathname, char * list, size_t size),
++ TP_ARGS(pathname, list, size),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(char *, list) __field(size_t, size)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(list, list) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_flistxattr
++SC_TRACE_EVENT(sys_flistxattr,
++ TP_PROTO(int fd, char * list, size_t size),
++ TP_ARGS(fd, list, size),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(char *, list) __field(size_t, size)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(list, list) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_setaffinity
++SC_TRACE_EVENT(sys_sched_setaffinity,
++ TP_PROTO(pid_t pid, unsigned int len, unsigned long * user_mask_ptr),
++ TP_ARGS(pid, len, user_mask_ptr),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, len) __field_hex(unsigned long *, user_mask_ptr)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(len, len) tp_assign(user_mask_ptr, user_mask_ptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_getaffinity
++SC_TRACE_EVENT(sys_sched_getaffinity,
++ TP_PROTO(pid_t pid, unsigned int len, unsigned long * user_mask_ptr),
++ TP_ARGS(pid, len, user_mask_ptr),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, len) __field_hex(unsigned long *, user_mask_ptr)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(len, len) tp_assign(user_mask_ptr, user_mask_ptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_io_submit
++SC_TRACE_EVENT(sys_io_submit,
++ TP_PROTO(aio_context_t ctx_id, long nr, struct iocb * * iocbpp),
++ TP_ARGS(ctx_id, nr, iocbpp),
++ TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field(long, nr) __field_hex(struct iocb * *, iocbpp)),
++ TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(nr, nr) tp_assign(iocbpp, iocbpp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_io_cancel
++SC_TRACE_EVENT(sys_io_cancel,
++ TP_PROTO(aio_context_t ctx_id, struct iocb * iocb, struct io_event * result),
++ TP_ARGS(ctx_id, iocb, result),
++ TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field_hex(struct iocb *, iocb) __field_hex(struct io_event *, result)),
++ TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(iocb, iocb) tp_assign(result, result)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timer_create
++SC_TRACE_EVENT(sys_timer_create,
++ TP_PROTO(const clockid_t which_clock, struct sigevent * timer_event_spec, timer_t * created_timer_id),
++ TP_ARGS(which_clock, timer_event_spec, created_timer_id),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct sigevent *, timer_event_spec) __field_hex(timer_t *, created_timer_id)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(timer_event_spec, timer_event_spec) tp_assign(created_timer_id, created_timer_id)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mq_getsetattr
++SC_TRACE_EVENT(sys_mq_getsetattr,
++ TP_PROTO(mqd_t mqdes, const struct mq_attr * u_mqstat, struct mq_attr * u_omqstat),
++ TP_ARGS(mqdes, u_mqstat, u_omqstat),
++ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const struct mq_attr *, u_mqstat) __field_hex(struct mq_attr *, u_omqstat)),
++ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_mqstat, u_mqstat) tp_assign(u_omqstat, u_omqstat)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_bind
++SC_TRACE_EVENT(sys_bind,
++ TP_PROTO(int fd, struct sockaddr * umyaddr, int addrlen),
++ TP_ARGS(fd, umyaddr, addrlen),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, umyaddr) __field_hex(int, addrlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(umyaddr, umyaddr) tp_assign(addrlen, addrlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_connect
++SC_TRACE_EVENT(sys_connect,
++ TP_PROTO(int fd, struct sockaddr * uservaddr, int addrlen),
++ TP_ARGS(fd, uservaddr, addrlen),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, uservaddr) __field_hex(int, addrlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(uservaddr, uservaddr) tp_assign(addrlen, addrlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_accept
++SC_TRACE_EVENT(sys_accept,
++ TP_PROTO(int fd, struct sockaddr * upeer_sockaddr, int * upeer_addrlen),
++ TP_ARGS(fd, upeer_sockaddr, upeer_addrlen),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, upeer_sockaddr) __field_hex(int *, upeer_addrlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(upeer_sockaddr, upeer_sockaddr) tp_assign(upeer_addrlen, upeer_addrlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getsockname
++SC_TRACE_EVENT(sys_getsockname,
++ TP_PROTO(int fd, struct sockaddr * usockaddr, int * usockaddr_len),
++ TP_ARGS(fd, usockaddr, usockaddr_len),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, usockaddr) __field_hex(int *, usockaddr_len)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(usockaddr, usockaddr) tp_assign(usockaddr_len, usockaddr_len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getpeername
++SC_TRACE_EVENT(sys_getpeername,
++ TP_PROTO(int fd, struct sockaddr * usockaddr, int * usockaddr_len),
++ TP_ARGS(fd, usockaddr, usockaddr_len),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, usockaddr) __field_hex(int *, usockaddr_len)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(usockaddr, usockaddr) tp_assign(usockaddr_len, usockaddr_len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sendmsg
++SC_TRACE_EVENT(sys_sendmsg,
++ TP_PROTO(int fd, struct msghdr * msg, unsigned flags),
++ TP_ARGS(fd, msg, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct msghdr *, msg) __field(unsigned, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(msg, msg) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_recvmsg
++SC_TRACE_EVENT(sys_recvmsg,
++ TP_PROTO(int fd, struct msghdr * msg, unsigned int flags),
++ TP_ARGS(fd, msg, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct msghdr *, msg) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(msg, msg) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_semop
++SC_TRACE_EVENT(sys_semop,
++ TP_PROTO(int semid, struct sembuf * tsops, unsigned nsops),
++ TP_ARGS(semid, tsops, nsops),
++ TP_STRUCT__entry(__field(int, semid) __field_hex(struct sembuf *, tsops) __field(unsigned, nsops)),
++ TP_fast_assign(tp_assign(semid, semid) tp_assign(tsops, tsops) tp_assign(nsops, nsops)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_msgctl
++SC_TRACE_EVENT(sys_msgctl,
++ TP_PROTO(int msqid, int cmd, struct msqid_ds * buf),
++ TP_ARGS(msqid, cmd, buf),
++ TP_STRUCT__entry(__field(int, msqid) __field(int, cmd) __field_hex(struct msqid_ds *, buf)),
++ TP_fast_assign(tp_assign(msqid, msqid) tp_assign(cmd, cmd) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_shmat
++SC_TRACE_EVENT(sys_shmat,
++ TP_PROTO(int shmid, char * shmaddr, int shmflg),
++ TP_ARGS(shmid, shmaddr, shmflg),
++ TP_STRUCT__entry(__field(int, shmid) __field_hex(char *, shmaddr) __field(int, shmflg)),
++ TP_fast_assign(tp_assign(shmid, shmid) tp_assign(shmaddr, shmaddr) tp_assign(shmflg, shmflg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_shmctl
++SC_TRACE_EVENT(sys_shmctl,
++ TP_PROTO(int shmid, int cmd, struct shmid_ds * buf),
++ TP_ARGS(shmid, cmd, buf),
++ TP_STRUCT__entry(__field(int, shmid) __field(int, cmd) __field_hex(struct shmid_ds *, buf)),
++ TP_fast_assign(tp_assign(shmid, shmid) tp_assign(cmd, cmd) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_inotify_add_watch
++SC_TRACE_EVENT(sys_inotify_add_watch,
++ TP_PROTO(int fd, const char * pathname, u32 mask),
++ TP_ARGS(fd, pathname, mask),
++ TP_STRUCT__entry(__field(int, fd) __string_from_user(pathname, pathname) __field(u32, mask)),
++ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(pathname, pathname) tp_assign(mask, mask)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mkdirat
++SC_TRACE_EVENT(sys_mkdirat,
++ TP_PROTO(int dfd, const char * pathname, umode_t mode),
++ TP_ARGS(dfd, pathname, mode),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field(umode_t, mode)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_futimesat
++SC_TRACE_EVENT(sys_futimesat,
++ TP_PROTO(int dfd, const char * filename, struct timeval * utimes),
++ TP_ARGS(dfd, filename, utimes),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct timeval *, utimes)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_unlinkat
++SC_TRACE_EVENT(sys_unlinkat,
++ TP_PROTO(int dfd, const char * pathname, int flag),
++ TP_ARGS(dfd, pathname, flag),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field(int, flag)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(flag, flag)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_symlinkat
++SC_TRACE_EVENT(sys_symlinkat,
++ TP_PROTO(const char * oldname, int newdfd, const char * newname),
++ TP_ARGS(oldname, newdfd, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fchmodat
++SC_TRACE_EVENT(sys_fchmodat,
++ TP_PROTO(int dfd, const char * filename, umode_t mode),
++ TP_ARGS(dfd, filename, mode),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(umode_t, mode)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_faccessat
++SC_TRACE_EVENT(sys_faccessat,
++ TP_PROTO(int dfd, const char * filename, int mode),
++ TP_ARGS(dfd, filename, mode),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, mode)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_get_robust_list
++SC_TRACE_EVENT(sys_get_robust_list,
++ TP_PROTO(int pid, struct robust_list_head * * head_ptr, size_t * len_ptr),
++ TP_ARGS(pid, head_ptr, len_ptr),
++ TP_STRUCT__entry(__field(int, pid) __field_hex(struct robust_list_head * *, head_ptr) __field_hex(size_t *, len_ptr)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(head_ptr, head_ptr) tp_assign(len_ptr, len_ptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getcpu
++SC_TRACE_EVENT(sys_getcpu,
++ TP_PROTO(unsigned * cpup, unsigned * nodep, struct getcpu_cache * unused),
++ TP_ARGS(cpup, nodep, unused),
++ TP_STRUCT__entry(__field_hex(unsigned *, cpup) __field_hex(unsigned *, nodep) __field_hex(struct getcpu_cache *, unused)),
++ TP_fast_assign(tp_assign(cpup, cpup) tp_assign(nodep, nodep) tp_assign(unused, unused)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_signalfd
++SC_TRACE_EVENT(sys_signalfd,
++ TP_PROTO(int ufd, sigset_t * user_mask, size_t sizemask),
++ TP_ARGS(ufd, user_mask, sizemask),
++ TP_STRUCT__entry(__field(int, ufd) __field_hex(sigset_t *, user_mask) __field(size_t, sizemask)),
++ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(user_mask, user_mask) tp_assign(sizemask, sizemask)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_open_by_handle_at
++SC_TRACE_EVENT(sys_open_by_handle_at,
++ TP_PROTO(int mountdirfd, struct file_handle * handle, int flags),
++ TP_ARGS(mountdirfd, handle, flags),
++ TP_STRUCT__entry(__field(int, mountdirfd) __field_hex(struct file_handle *, handle) __field(int, flags)),
++ TP_fast_assign(tp_assign(mountdirfd, mountdirfd) tp_assign(handle, handle) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_reboot
++SC_TRACE_EVENT(sys_reboot,
++ TP_PROTO(int magic1, int magic2, unsigned int cmd, void * arg),
++ TP_ARGS(magic1, magic2, cmd, arg),
++ TP_STRUCT__entry(__field(int, magic1) __field(int, magic2) __field(unsigned int, cmd) __field_hex(void *, arg)),
++ TP_fast_assign(tp_assign(magic1, magic1) tp_assign(magic2, magic2) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_wait4
++SC_TRACE_EVENT(sys_wait4,
++ TP_PROTO(pid_t upid, int * stat_addr, int options, struct rusage * ru),
++ TP_ARGS(upid, stat_addr, options, ru),
++ TP_STRUCT__entry(__field(pid_t, upid) __field_hex(int *, stat_addr) __field(int, options) __field_hex(struct rusage *, ru)),
++ TP_fast_assign(tp_assign(upid, upid) tp_assign(stat_addr, stat_addr) tp_assign(options, options) tp_assign(ru, ru)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_quotactl
++SC_TRACE_EVENT(sys_quotactl,
++ TP_PROTO(unsigned int cmd, const char * special, qid_t id, void * addr),
++ TP_ARGS(cmd, special, id, addr),
++ TP_STRUCT__entry(__field(unsigned int, cmd) __field_hex(const char *, special) __field(qid_t, id) __field_hex(void *, addr)),
++ TP_fast_assign(tp_assign(cmd, cmd) tp_assign(special, special) tp_assign(id, id) tp_assign(addr, addr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rt_sigaction
++SC_TRACE_EVENT(sys_rt_sigaction,
++ TP_PROTO(int sig, const struct sigaction * act, struct sigaction * oact, size_t sigsetsize),
++ TP_ARGS(sig, act, oact, sigsetsize),
++ TP_STRUCT__entry(__field(int, sig) __field_hex(const struct sigaction *, act) __field_hex(struct sigaction *, oact) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(sig, sig) tp_assign(act, act) tp_assign(oact, oact) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rt_sigprocmask
++SC_TRACE_EVENT(sys_rt_sigprocmask,
++ TP_PROTO(int how, sigset_t * nset, sigset_t * oset, size_t sigsetsize),
++ TP_ARGS(how, nset, oset, sigsetsize),
++ TP_STRUCT__entry(__field(int, how) __field_hex(sigset_t *, nset) __field_hex(sigset_t *, oset) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(how, how) tp_assign(nset, nset) tp_assign(oset, oset) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rt_sigtimedwait
++SC_TRACE_EVENT(sys_rt_sigtimedwait,
++ TP_PROTO(const sigset_t * uthese, siginfo_t * uinfo, const struct timespec * uts, size_t sigsetsize),
++ TP_ARGS(uthese, uinfo, uts, sigsetsize),
++ TP_STRUCT__entry(__field_hex(const sigset_t *, uthese) __field_hex(siginfo_t *, uinfo) __field_hex(const struct timespec *, uts) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(uthese, uthese) tp_assign(uinfo, uinfo) tp_assign(uts, uts) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sendfile
++SC_TRACE_EVENT(sys_sendfile,
++ TP_PROTO(int out_fd, int in_fd, off_t * offset, size_t count),
++ TP_ARGS(out_fd, in_fd, offset, count),
++ TP_STRUCT__entry(__field(int, out_fd) __field(int, in_fd) __field_hex(off_t *, offset) __field(size_t, count)),
++ TP_fast_assign(tp_assign(out_fd, out_fd) tp_assign(in_fd, in_fd) tp_assign(offset, offset) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getxattr
++SC_TRACE_EVENT(sys_getxattr,
++ TP_PROTO(const char * pathname, const char * name, void * value, size_t size),
++ TP_ARGS(pathname, name, value, size),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lgetxattr
++SC_TRACE_EVENT(sys_lgetxattr,
++ TP_PROTO(const char * pathname, const char * name, void * value, size_t size),
++ TP_ARGS(pathname, name, value, size),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fgetxattr
++SC_TRACE_EVENT(sys_fgetxattr,
++ TP_PROTO(int fd, const char * name, void * value, size_t size),
++ TP_ARGS(fd, name, value, size),
++ TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
++ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sendfile64
++SC_TRACE_EVENT(sys_sendfile64,
++ TP_PROTO(int out_fd, int in_fd, loff_t * offset, size_t count),
++ TP_ARGS(out_fd, in_fd, offset, count),
++ TP_STRUCT__entry(__field(int, out_fd) __field(int, in_fd) __field_hex(loff_t *, offset) __field(size_t, count)),
++ TP_fast_assign(tp_assign(out_fd, out_fd) tp_assign(in_fd, in_fd) tp_assign(offset, offset) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_epoll_ctl
++SC_TRACE_EVENT(sys_epoll_ctl,
++ TP_PROTO(int epfd, int op, int fd, struct epoll_event * event),
++ TP_ARGS(epfd, op, fd, event),
++ TP_STRUCT__entry(__field(int, epfd) __field(int, op) __field(int, fd) __field_hex(struct epoll_event *, event)),
++ TP_fast_assign(tp_assign(epfd, epfd) tp_assign(op, op) tp_assign(fd, fd) tp_assign(event, event)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_epoll_wait
++SC_TRACE_EVENT(sys_epoll_wait,
++ TP_PROTO(int epfd, struct epoll_event * events, int maxevents, int timeout),
++ TP_ARGS(epfd, events, maxevents, timeout),
++ TP_STRUCT__entry(__field(int, epfd) __field_hex(struct epoll_event *, events) __field(int, maxevents) __field(int, timeout)),
++ TP_fast_assign(tp_assign(epfd, epfd) tp_assign(events, events) tp_assign(maxevents, maxevents) tp_assign(timeout, timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timer_settime
++SC_TRACE_EVENT(sys_timer_settime,
++ TP_PROTO(timer_t timer_id, int flags, const struct itimerspec * new_setting, struct itimerspec * old_setting),
++ TP_ARGS(timer_id, flags, new_setting, old_setting),
++ TP_STRUCT__entry(__field(timer_t, timer_id) __field(int, flags) __field_hex(const struct itimerspec *, new_setting) __field_hex(struct itimerspec *, old_setting)),
++ TP_fast_assign(tp_assign(timer_id, timer_id) tp_assign(flags, flags) tp_assign(new_setting, new_setting) tp_assign(old_setting, old_setting)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_clock_nanosleep
++SC_TRACE_EVENT(sys_clock_nanosleep,
++ TP_PROTO(const clockid_t which_clock, int flags, const struct timespec * rqtp, struct timespec * rmtp),
++ TP_ARGS(which_clock, flags, rqtp, rmtp),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field(int, flags) __field_hex(const struct timespec *, rqtp) __field_hex(struct timespec *, rmtp)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(flags, flags) tp_assign(rqtp, rqtp) tp_assign(rmtp, rmtp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mq_open
++SC_TRACE_EVENT(sys_mq_open,
++ TP_PROTO(const char * u_name, int oflag, umode_t mode, struct mq_attr * u_attr),
++ TP_ARGS(u_name, oflag, mode, u_attr),
++ TP_STRUCT__entry(__string_from_user(u_name, u_name) __field(int, oflag) __field(umode_t, mode) __field_hex(struct mq_attr *, u_attr)),
++ TP_fast_assign(tp_copy_string_from_user(u_name, u_name) tp_assign(oflag, oflag) tp_assign(mode, mode) tp_assign(u_attr, u_attr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_socketpair
++SC_TRACE_EVENT(sys_socketpair,
++ TP_PROTO(int family, int type, int protocol, int * usockvec),
++ TP_ARGS(family, type, protocol, usockvec),
++ TP_STRUCT__entry(__field(int, family) __field(int, type) __field(int, protocol) __field_hex(int *, usockvec)),
++ TP_fast_assign(tp_assign(family, family) tp_assign(type, type) tp_assign(protocol, protocol) tp_assign(usockvec, usockvec)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_send
++SC_TRACE_EVENT(sys_send,
++ TP_PROTO(int fd, void * buff, size_t len, unsigned flags),
++ TP_ARGS(fd, buff, len, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(void *, buff) __field(size_t, len) __field(unsigned, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buff, buff) tp_assign(len, len) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_msgsnd
++SC_TRACE_EVENT(sys_msgsnd,
++ TP_PROTO(int msqid, struct msgbuf * msgp, size_t msgsz, int msgflg),
++ TP_ARGS(msqid, msgp, msgsz, msgflg),
++ TP_STRUCT__entry(__field(int, msqid) __field_hex(struct msgbuf *, msgp) __field(size_t, msgsz) __field(int, msgflg)),
++ TP_fast_assign(tp_assign(msqid, msqid) tp_assign(msgp, msgp) tp_assign(msgsz, msgsz) tp_assign(msgflg, msgflg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_request_key
++SC_TRACE_EVENT(sys_request_key,
++ TP_PROTO(const char * _type, const char * _description, const char * _callout_info, key_serial_t destringid),
++ TP_ARGS(_type, _description, _callout_info, destringid),
++ TP_STRUCT__entry(__string_from_user(_type, _type) __field_hex(const char *, _description) __field_hex(const char *, _callout_info) __field(key_serial_t, destringid)),
++ TP_fast_assign(tp_copy_string_from_user(_type, _type) tp_assign(_description, _description) tp_assign(_callout_info, _callout_info) tp_assign(destringid, destringid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_semtimedop
++SC_TRACE_EVENT(sys_semtimedop,
++ TP_PROTO(int semid, struct sembuf * tsops, unsigned nsops, const struct timespec * timeout),
++ TP_ARGS(semid, tsops, nsops, timeout),
++ TP_STRUCT__entry(__field(int, semid) __field_hex(struct sembuf *, tsops) __field(unsigned, nsops) __field_hex(const struct timespec *, timeout)),
++ TP_fast_assign(tp_assign(semid, semid) tp_assign(tsops, tsops) tp_assign(nsops, nsops) tp_assign(timeout, timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_openat
++SC_TRACE_EVENT(sys_openat,
++ TP_PROTO(int dfd, const char * filename, int flags, umode_t mode),
++ TP_ARGS(dfd, filename, flags, mode),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, flags) __field(umode_t, mode)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(flags, flags) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mknodat
++SC_TRACE_EVENT(sys_mknodat,
++ TP_PROTO(int dfd, const char * filename, umode_t mode, unsigned dev),
++ TP_ARGS(dfd, filename, mode, dev),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(umode_t, mode) __field(unsigned, dev)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode) tp_assign(dev, dev)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fstatat64
++SC_TRACE_EVENT(sys_fstatat64,
++ TP_PROTO(int dfd, const char * filename, struct stat64 * statbuf, int flag),
++ TP_ARGS(dfd, filename, statbuf, flag),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct stat64 *, statbuf) __field(int, flag)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf) tp_assign(flag, flag)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_renameat
++SC_TRACE_EVENT(sys_renameat,
++ TP_PROTO(int olddfd, const char * oldname, int newdfd, const char * newname),
++ TP_ARGS(olddfd, oldname, newdfd, newname),
++ TP_STRUCT__entry(__field(int, olddfd) __string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_assign(olddfd, olddfd) tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_readlinkat
++SC_TRACE_EVENT(sys_readlinkat,
++ TP_PROTO(int dfd, const char * pathname, char * buf, int bufsiz),
++ TP_ARGS(dfd, pathname, buf, bufsiz),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field_hex(char *, buf) __field(int, bufsiz)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(buf, buf) tp_assign(bufsiz, bufsiz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_vmsplice
++SC_TRACE_EVENT(sys_vmsplice,
++ TP_PROTO(int fd, const struct iovec * iov, unsigned long nr_segs, unsigned int flags),
++ TP_ARGS(fd, iov, nr_segs, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(const struct iovec *, iov) __field(unsigned long, nr_segs) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(iov, iov) tp_assign(nr_segs, nr_segs) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_utimensat
++SC_TRACE_EVENT(sys_utimensat,
++ TP_PROTO(int dfd, const char * filename, struct timespec * utimes, int flags),
++ TP_ARGS(dfd, filename, utimes, flags),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct timespec *, utimes) __field(int, flags)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timerfd_settime
++SC_TRACE_EVENT(sys_timerfd_settime,
++ TP_PROTO(int ufd, int flags, const struct itimerspec * utmr, struct itimerspec * otmr),
++ TP_ARGS(ufd, flags, utmr, otmr),
++ TP_STRUCT__entry(__field(int, ufd) __field(int, flags) __field_hex(const struct itimerspec *, utmr) __field_hex(struct itimerspec *, otmr)),
++ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(flags, flags) tp_assign(utmr, utmr) tp_assign(otmr, otmr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_signalfd4
++SC_TRACE_EVENT(sys_signalfd4,
++ TP_PROTO(int ufd, sigset_t * user_mask, size_t sizemask, int flags),
++ TP_ARGS(ufd, user_mask, sizemask, flags),
++ TP_STRUCT__entry(__field(int, ufd) __field_hex(sigset_t *, user_mask) __field(size_t, sizemask) __field(int, flags)),
++ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(user_mask, user_mask) tp_assign(sizemask, sizemask) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rt_tgsigqueueinfo
++SC_TRACE_EVENT(sys_rt_tgsigqueueinfo,
++ TP_PROTO(pid_t tgid, pid_t pid, int sig, siginfo_t * uinfo),
++ TP_ARGS(tgid, pid, sig, uinfo),
++ TP_STRUCT__entry(__field(pid_t, tgid) __field(pid_t, pid) __field(int, sig) __field_hex(siginfo_t *, uinfo)),
++ TP_fast_assign(tp_assign(tgid, tgid) tp_assign(pid, pid) tp_assign(sig, sig) tp_assign(uinfo, uinfo)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_accept4
++SC_TRACE_EVENT(sys_accept4,
++ TP_PROTO(int fd, struct sockaddr * upeer_sockaddr, int * upeer_addrlen, int flags),
++ TP_ARGS(fd, upeer_sockaddr, upeer_addrlen, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, upeer_sockaddr) __field_hex(int *, upeer_addrlen) __field(int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(upeer_sockaddr, upeer_sockaddr) tp_assign(upeer_addrlen, upeer_addrlen) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_prlimit64
++SC_TRACE_EVENT(sys_prlimit64,
++ TP_PROTO(pid_t pid, unsigned int resource, const struct rlimit64 * new_rlim, struct rlimit64 * old_rlim),
++ TP_ARGS(pid, resource, new_rlim, old_rlim),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, resource) __field_hex(const struct rlimit64 *, new_rlim) __field_hex(struct rlimit64 *, old_rlim)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(resource, resource) tp_assign(new_rlim, new_rlim) tp_assign(old_rlim, old_rlim)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sendmmsg
++SC_TRACE_EVENT(sys_sendmmsg,
++ TP_PROTO(int fd, struct mmsghdr * mmsg, unsigned int vlen, unsigned int flags),
++ TP_ARGS(fd, mmsg, vlen, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct mmsghdr *, mmsg) __field(unsigned int, vlen) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(mmsg, mmsg) tp_assign(vlen, vlen) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mount
++SC_TRACE_EVENT(sys_mount,
++ TP_PROTO(char * dev_name, char * dir_name, char * type, unsigned long flags, void * data),
++ TP_ARGS(dev_name, dir_name, type, flags, data),
++ TP_STRUCT__entry(__string_from_user(dev_name, dev_name) __string_from_user(dir_name, dir_name) __string_from_user(type, type) __field(unsigned long, flags) __field_hex(void *, data)),
++ TP_fast_assign(tp_copy_string_from_user(dev_name, dev_name) tp_copy_string_from_user(dir_name, dir_name) tp_copy_string_from_user(type, type) tp_assign(flags, flags) tp_assign(data, data)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_llseek
++SC_TRACE_EVENT(sys_llseek,
++ TP_PROTO(unsigned int fd, unsigned long offset_high, unsigned long offset_low, loff_t * result, unsigned int origin),
++ TP_ARGS(fd, offset_high, offset_low, result, origin),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned long, offset_high) __field(unsigned long, offset_low) __field_hex(loff_t *, result) __field(unsigned int, origin)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(offset_high, offset_high) tp_assign(offset_low, offset_low) tp_assign(result, result) tp_assign(origin, origin)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_select
++SC_TRACE_EVENT(sys_select,
++ TP_PROTO(int n, fd_set * inp, fd_set * outp, fd_set * exp, struct timeval * tvp),
++ TP_ARGS(n, inp, outp, exp, tvp),
++ TP_STRUCT__entry(__field(int, n) __field_hex(fd_set *, inp) __field_hex(fd_set *, outp) __field_hex(fd_set *, exp) __field_hex(struct timeval *, tvp)),
++ TP_fast_assign(tp_assign(n, n) tp_assign(inp, inp) tp_assign(outp, outp) tp_assign(exp, exp) tp_assign(tvp, tvp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setxattr
++SC_TRACE_EVENT(sys_setxattr,
++ TP_PROTO(const char * pathname, const char * name, const void * value, size_t size, int flags),
++ TP_ARGS(pathname, name, value, size, flags),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lsetxattr
++SC_TRACE_EVENT(sys_lsetxattr,
++ TP_PROTO(const char * pathname, const char * name, const void * value, size_t size, int flags),
++ TP_ARGS(pathname, name, value, size, flags),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fsetxattr
++SC_TRACE_EVENT(sys_fsetxattr,
++ TP_PROTO(int fd, const char * name, const void * value, size_t size, int flags),
++ TP_ARGS(fd, name, value, size, flags),
++ TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_io_getevents
++SC_TRACE_EVENT(sys_io_getevents,
++ TP_PROTO(aio_context_t ctx_id, long min_nr, long nr, struct io_event * events, struct timespec * timeout),
++ TP_ARGS(ctx_id, min_nr, nr, events, timeout),
++ TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field(long, min_nr) __field(long, nr) __field_hex(struct io_event *, events) __field_hex(struct timespec *, timeout)),
++ TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(min_nr, min_nr) tp_assign(nr, nr) tp_assign(events, events) tp_assign(timeout, timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mq_timedsend
++SC_TRACE_EVENT(sys_mq_timedsend,
++ TP_PROTO(mqd_t mqdes, const char * u_msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec * u_abs_timeout),
++ TP_ARGS(mqdes, u_msg_ptr, msg_len, msg_prio, u_abs_timeout),
++ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const char *, u_msg_ptr) __field(size_t, msg_len) __field(unsigned int, msg_prio) __field_hex(const struct timespec *, u_abs_timeout)),
++ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_msg_ptr, u_msg_ptr) tp_assign(msg_len, msg_len) tp_assign(msg_prio, msg_prio) tp_assign(u_abs_timeout, u_abs_timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mq_timedreceive
++SC_TRACE_EVENT(sys_mq_timedreceive,
++ TP_PROTO(mqd_t mqdes, char * u_msg_ptr, size_t msg_len, unsigned int * u_msg_prio, const struct timespec * u_abs_timeout),
++ TP_ARGS(mqdes, u_msg_ptr, msg_len, u_msg_prio, u_abs_timeout),
++ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(char *, u_msg_ptr) __field(size_t, msg_len) __field_hex(unsigned int *, u_msg_prio) __field_hex(const struct timespec *, u_abs_timeout)),
++ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_msg_ptr, u_msg_ptr) tp_assign(msg_len, msg_len) tp_assign(u_msg_prio, u_msg_prio) tp_assign(u_abs_timeout, u_abs_timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_waitid
++SC_TRACE_EVENT(sys_waitid,
++ TP_PROTO(int which, pid_t upid, struct siginfo * infop, int options, struct rusage * ru),
++ TP_ARGS(which, upid, infop, options, ru),
++ TP_STRUCT__entry(__field(int, which) __field(pid_t, upid) __field_hex(struct siginfo *, infop) __field(int, options) __field_hex(struct rusage *, ru)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(upid, upid) tp_assign(infop, infop) tp_assign(options, options) tp_assign(ru, ru)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setsockopt
++SC_TRACE_EVENT(sys_setsockopt,
++ TP_PROTO(int fd, int level, int optname, char * optval, int optlen),
++ TP_ARGS(fd, level, optname, optval, optlen),
++ TP_STRUCT__entry(__field(int, fd) __field(int, level) __field(int, optname) __field_hex(char *, optval) __field(int, optlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(level, level) tp_assign(optname, optname) tp_assign(optval, optval) tp_assign(optlen, optlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getsockopt
++SC_TRACE_EVENT(sys_getsockopt,
++ TP_PROTO(int fd, int level, int optname, char * optval, int * optlen),
++ TP_ARGS(fd, level, optname, optval, optlen),
++ TP_STRUCT__entry(__field(int, fd) __field(int, level) __field(int, optname) __field_hex(char *, optval) __field_hex(int *, optlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(level, level) tp_assign(optname, optname) tp_assign(optval, optval) tp_assign(optlen, optlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_msgrcv
++SC_TRACE_EVENT(sys_msgrcv,
++ TP_PROTO(int msqid, struct msgbuf * msgp, size_t msgsz, long msgtyp, int msgflg),
++ TP_ARGS(msqid, msgp, msgsz, msgtyp, msgflg),
++ TP_STRUCT__entry(__field(int, msqid) __field_hex(struct msgbuf *, msgp) __field(size_t, msgsz) __field(long, msgtyp) __field(int, msgflg)),
++ TP_fast_assign(tp_assign(msqid, msqid) tp_assign(msgp, msgp) tp_assign(msgsz, msgsz) tp_assign(msgtyp, msgtyp) tp_assign(msgflg, msgflg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_add_key
++SC_TRACE_EVENT(sys_add_key,
++ TP_PROTO(const char * _type, const char * _description, const void * _payload, size_t plen, key_serial_t ringid),
++ TP_ARGS(_type, _description, _payload, plen, ringid),
++ TP_STRUCT__entry(__string_from_user(_type, _type) __field_hex(const char *, _description) __field_hex(const void *, _payload) __field(size_t, plen) __field(key_serial_t, ringid)),
++ TP_fast_assign(tp_copy_string_from_user(_type, _type) tp_assign(_description, _description) tp_assign(_payload, _payload) tp_assign(plen, plen) tp_assign(ringid, ringid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fchownat
++SC_TRACE_EVENT(sys_fchownat,
++ TP_PROTO(int dfd, const char * filename, uid_t user, gid_t group, int flag),
++ TP_ARGS(dfd, filename, user, group, flag),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group) __field(int, flag)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group) tp_assign(flag, flag)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_linkat
++SC_TRACE_EVENT(sys_linkat,
++ TP_PROTO(int olddfd, const char * oldname, int newdfd, const char * newname, int flags),
++ TP_ARGS(olddfd, oldname, newdfd, newname, flags),
++ TP_STRUCT__entry(__field(int, olddfd) __string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname) __field(int, flags)),
++ TP_fast_assign(tp_assign(olddfd, olddfd) tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ppoll
++SC_TRACE_EVENT(sys_ppoll,
++ TP_PROTO(struct pollfd * ufds, unsigned int nfds, struct timespec * tsp, const sigset_t * sigmask, size_t sigsetsize),
++ TP_ARGS(ufds, nfds, tsp, sigmask, sigsetsize),
++ TP_STRUCT__entry(__field_hex(struct pollfd *, ufds) __field(unsigned int, nfds) __field_hex(struct timespec *, tsp) __field_hex(const sigset_t *, sigmask) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(ufds, ufds) tp_assign(nfds, nfds) tp_assign(tsp, tsp) tp_assign(sigmask, sigmask) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_preadv
++SC_TRACE_EVENT(sys_preadv,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h),
++ TP_ARGS(fd, vec, vlen, pos_l, pos_h),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen) __field(unsigned long, pos_l) __field(unsigned long, pos_h)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen) tp_assign(pos_l, pos_l) tp_assign(pos_h, pos_h)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_pwritev
++SC_TRACE_EVENT(sys_pwritev,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h),
++ TP_ARGS(fd, vec, vlen, pos_l, pos_h),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen) __field(unsigned long, pos_l) __field(unsigned long, pos_h)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen) tp_assign(pos_l, pos_l) tp_assign(pos_h, pos_h)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_perf_event_open
++SC_TRACE_EVENT(sys_perf_event_open,
++ TP_PROTO(struct perf_event_attr * attr_uptr, pid_t pid, int cpu, int group_fd, unsigned long flags),
++ TP_ARGS(attr_uptr, pid, cpu, group_fd, flags),
++ TP_STRUCT__entry(__field_hex(struct perf_event_attr *, attr_uptr) __field(pid_t, pid) __field(int, cpu) __field(int, group_fd) __field(unsigned long, flags)),
++ TP_fast_assign(tp_assign(attr_uptr, attr_uptr) tp_assign(pid, pid) tp_assign(cpu, cpu) tp_assign(group_fd, group_fd) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_recvmmsg
++SC_TRACE_EVENT(sys_recvmmsg,
++ TP_PROTO(int fd, struct mmsghdr * mmsg, unsigned int vlen, unsigned int flags, struct timespec * timeout),
++ TP_ARGS(fd, mmsg, vlen, flags, timeout),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct mmsghdr *, mmsg) __field(unsigned int, vlen) __field(unsigned int, flags) __field_hex(struct timespec *, timeout)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(mmsg, mmsg) tp_assign(vlen, vlen) tp_assign(flags, flags) tp_assign(timeout, timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_name_to_handle_at
++SC_TRACE_EVENT(sys_name_to_handle_at,
++ TP_PROTO(int dfd, const char * name, struct file_handle * handle, int * mnt_id, int flag),
++ TP_ARGS(dfd, name, handle, mnt_id, flag),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(name, name) __field_hex(struct file_handle *, handle) __field_hex(int *, mnt_id) __field(int, flag)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(name, name) tp_assign(handle, handle) tp_assign(mnt_id, mnt_id) tp_assign(flag, flag)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_futex
++SC_TRACE_EVENT(sys_futex,
++ TP_PROTO(u32 * uaddr, int op, u32 val, struct timespec * utime, u32 * uaddr2, u32 val3),
++ TP_ARGS(uaddr, op, val, utime, uaddr2, val3),
++ TP_STRUCT__entry(__field_hex(u32 *, uaddr) __field(int, op) __field(u32, val) __field_hex(struct timespec *, utime) __field_hex(u32 *, uaddr2) __field(u32, val3)),
++ TP_fast_assign(tp_assign(uaddr, uaddr) tp_assign(op, op) tp_assign(val, val) tp_assign(utime, utime) tp_assign(uaddr2, uaddr2) tp_assign(val3, val3)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sendto
++SC_TRACE_EVENT(sys_sendto,
++ TP_PROTO(int fd, void * buff, size_t len, unsigned flags, struct sockaddr * addr, int addr_len),
++ TP_ARGS(fd, buff, len, flags, addr, addr_len),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(void *, buff) __field(size_t, len) __field(unsigned, flags) __field_hex(struct sockaddr *, addr) __field_hex(int, addr_len)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buff, buff) tp_assign(len, len) tp_assign(flags, flags) tp_assign(addr, addr) tp_assign(addr_len, addr_len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_recvfrom
++SC_TRACE_EVENT(sys_recvfrom,
++ TP_PROTO(int fd, void * ubuf, size_t size, unsigned flags, struct sockaddr * addr, int * addr_len),
++ TP_ARGS(fd, ubuf, size, flags, addr, addr_len),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(void *, ubuf) __field(size_t, size) __field(unsigned, flags) __field_hex(struct sockaddr *, addr) __field_hex(int *, addr_len)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(ubuf, ubuf) tp_assign(size, size) tp_assign(flags, flags) tp_assign(addr, addr) tp_assign(addr_len, addr_len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_pselect6
++SC_TRACE_EVENT(sys_pselect6,
++ TP_PROTO(int n, fd_set * inp, fd_set * outp, fd_set * exp, struct timespec * tsp, void * sig),
++ TP_ARGS(n, inp, outp, exp, tsp, sig),
++ TP_STRUCT__entry(__field(int, n) __field_hex(fd_set *, inp) __field_hex(fd_set *, outp) __field_hex(fd_set *, exp) __field_hex(struct timespec *, tsp) __field_hex(void *, sig)),
++ TP_fast_assign(tp_assign(n, n) tp_assign(inp, inp) tp_assign(outp, outp) tp_assign(exp, exp) tp_assign(tsp, tsp) tp_assign(sig, sig)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_splice
++SC_TRACE_EVENT(sys_splice,
++ TP_PROTO(int fd_in, loff_t * off_in, int fd_out, loff_t * off_out, size_t len, unsigned int flags),
++ TP_ARGS(fd_in, off_in, fd_out, off_out, len, flags),
++ TP_STRUCT__entry(__field(int, fd_in) __field_hex(loff_t *, off_in) __field(int, fd_out) __field_hex(loff_t *, off_out) __field(size_t, len) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd_in, fd_in) tp_assign(off_in, off_in) tp_assign(fd_out, fd_out) tp_assign(off_out, off_out) tp_assign(len, len) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_epoll_pwait
++SC_TRACE_EVENT(sys_epoll_pwait,
++ TP_PROTO(int epfd, struct epoll_event * events, int maxevents, int timeout, const sigset_t * sigmask, size_t sigsetsize),
++ TP_ARGS(epfd, events, maxevents, timeout, sigmask, sigsetsize),
++ TP_STRUCT__entry(__field(int, epfd) __field_hex(struct epoll_event *, events) __field(int, maxevents) __field(int, timeout) __field_hex(const sigset_t *, sigmask) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(epfd, epfd) tp_assign(events, events) tp_assign(maxevents, maxevents) tp_assign(timeout, timeout) tp_assign(sigmask, sigmask) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_process_vm_readv
++SC_TRACE_EVENT(sys_process_vm_readv,
++ TP_PROTO(pid_t pid, const struct iovec * lvec, unsigned long liovcnt, const struct iovec * rvec, unsigned long riovcnt, unsigned long flags),
++ TP_ARGS(pid, lvec, liovcnt, rvec, riovcnt, flags),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(const struct iovec *, lvec) __field(unsigned long, liovcnt) __field_hex(const struct iovec *, rvec) __field(unsigned long, riovcnt) __field(unsigned long, flags)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(lvec, lvec) tp_assign(liovcnt, liovcnt) tp_assign(rvec, rvec) tp_assign(riovcnt, riovcnt) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_process_vm_writev
++SC_TRACE_EVENT(sys_process_vm_writev,
++ TP_PROTO(pid_t pid, const struct iovec * lvec, unsigned long liovcnt, const struct iovec * rvec, unsigned long riovcnt, unsigned long flags),
++ TP_ARGS(pid, lvec, liovcnt, rvec, riovcnt, flags),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(const struct iovec *, lvec) __field(unsigned long, liovcnt) __field_hex(const struct iovec *, rvec) __field(unsigned long, riovcnt) __field(unsigned long, flags)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(lvec, lvec) tp_assign(liovcnt, liovcnt) tp_assign(rvec, rvec) tp_assign(riovcnt, riovcnt) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++
++#endif /* _TRACE_SYSCALLS_POINTERS_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
++
++#else /* CREATE_SYSCALL_TABLE */
++
++#include "arm-32-syscalls-3.4.25_pointers_override.h"
++#include "syscalls_pointers_override.h"
++
++#ifndef OVERRIDE_TABLE_32_sys_read
++TRACE_SYSCALL_TABLE(sys_read, sys_read, 3, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_write
++TRACE_SYSCALL_TABLE(sys_write, sys_write, 4, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_open
++TRACE_SYSCALL_TABLE(sys_open, sys_open, 5, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_creat
++TRACE_SYSCALL_TABLE(sys_creat, sys_creat, 8, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_link
++TRACE_SYSCALL_TABLE(sys_link, sys_link, 9, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_unlink
++TRACE_SYSCALL_TABLE(sys_unlink, sys_unlink, 10, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_chdir
++TRACE_SYSCALL_TABLE(sys_chdir, sys_chdir, 12, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mknod
++TRACE_SYSCALL_TABLE(sys_mknod, sys_mknod, 14, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_chmod
++TRACE_SYSCALL_TABLE(sys_chmod, sys_chmod, 15, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lchown16
++TRACE_SYSCALL_TABLE(sys_lchown16, sys_lchown16, 16, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mount
++TRACE_SYSCALL_TABLE(sys_mount, sys_mount, 21, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_access
++TRACE_SYSCALL_TABLE(sys_access, sys_access, 33, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rename
++TRACE_SYSCALL_TABLE(sys_rename, sys_rename, 38, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mkdir
++TRACE_SYSCALL_TABLE(sys_mkdir, sys_mkdir, 39, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rmdir
++TRACE_SYSCALL_TABLE(sys_rmdir, sys_rmdir, 40, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_pipe
++TRACE_SYSCALL_TABLE(sys_pipe, sys_pipe, 42, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_times
++TRACE_SYSCALL_TABLE(sys_times, sys_times, 43, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_acct
++TRACE_SYSCALL_TABLE(sys_acct, sys_acct, 51, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_umount
++TRACE_SYSCALL_TABLE(sys_umount, sys_umount, 52, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_chroot
++TRACE_SYSCALL_TABLE(sys_chroot, sys_chroot, 61, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ustat
++TRACE_SYSCALL_TABLE(sys_ustat, sys_ustat, 62, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sigpending
++TRACE_SYSCALL_TABLE(sys_sigpending, sys_sigpending, 73, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sethostname
++TRACE_SYSCALL_TABLE(sys_sethostname, sys_sethostname, 74, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setrlimit
++TRACE_SYSCALL_TABLE(sys_setrlimit, sys_setrlimit, 75, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getrusage
++TRACE_SYSCALL_TABLE(sys_getrusage, sys_getrusage, 77, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_gettimeofday
++TRACE_SYSCALL_TABLE(sys_gettimeofday, sys_gettimeofday, 78, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_settimeofday
++TRACE_SYSCALL_TABLE(sys_settimeofday, sys_settimeofday, 79, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getgroups16
++TRACE_SYSCALL_TABLE(sys_getgroups16, sys_getgroups16, 80, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setgroups16
++TRACE_SYSCALL_TABLE(sys_setgroups16, sys_setgroups16, 81, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_symlink
++TRACE_SYSCALL_TABLE(sys_symlink, sys_symlink, 83, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_readlink
++TRACE_SYSCALL_TABLE(sys_readlink, sys_readlink, 85, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_uselib
++TRACE_SYSCALL_TABLE(sys_uselib, sys_uselib, 86, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_swapon
++TRACE_SYSCALL_TABLE(sys_swapon, sys_swapon, 87, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_reboot
++TRACE_SYSCALL_TABLE(sys_reboot, sys_reboot, 88, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_truncate
++TRACE_SYSCALL_TABLE(sys_truncate, sys_truncate, 92, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_statfs
++TRACE_SYSCALL_TABLE(sys_statfs, sys_statfs, 99, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fstatfs
++TRACE_SYSCALL_TABLE(sys_fstatfs, sys_fstatfs, 100, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_syslog
++TRACE_SYSCALL_TABLE(sys_syslog, sys_syslog, 103, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setitimer
++TRACE_SYSCALL_TABLE(sys_setitimer, sys_setitimer, 104, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getitimer
++TRACE_SYSCALL_TABLE(sys_getitimer, sys_getitimer, 105, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_newstat
++TRACE_SYSCALL_TABLE(sys_newstat, sys_newstat, 106, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_newlstat
++TRACE_SYSCALL_TABLE(sys_newlstat, sys_newlstat, 107, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_newfstat
++TRACE_SYSCALL_TABLE(sys_newfstat, sys_newfstat, 108, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_wait4
++TRACE_SYSCALL_TABLE(sys_wait4, sys_wait4, 114, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_swapoff
++TRACE_SYSCALL_TABLE(sys_swapoff, sys_swapoff, 115, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sysinfo
++TRACE_SYSCALL_TABLE(sys_sysinfo, sys_sysinfo, 116, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setdomainname
++TRACE_SYSCALL_TABLE(sys_setdomainname, sys_setdomainname, 121, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_newuname
++TRACE_SYSCALL_TABLE(sys_newuname, sys_newuname, 122, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_adjtimex
++TRACE_SYSCALL_TABLE(sys_adjtimex, sys_adjtimex, 124, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sigprocmask
++TRACE_SYSCALL_TABLE(sys_sigprocmask, sys_sigprocmask, 126, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_init_module
++TRACE_SYSCALL_TABLE(sys_init_module, sys_init_module, 128, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_delete_module
++TRACE_SYSCALL_TABLE(sys_delete_module, sys_delete_module, 129, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_quotactl
++TRACE_SYSCALL_TABLE(sys_quotactl, sys_quotactl, 131, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_llseek
++TRACE_SYSCALL_TABLE(sys_llseek, sys_llseek, 140, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getdents
++TRACE_SYSCALL_TABLE(sys_getdents, sys_getdents, 141, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_select
++TRACE_SYSCALL_TABLE(sys_select, sys_select, 142, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_readv
++TRACE_SYSCALL_TABLE(sys_readv, sys_readv, 145, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_writev
++TRACE_SYSCALL_TABLE(sys_writev, sys_writev, 146, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sysctl
++TRACE_SYSCALL_TABLE(sys_sysctl, sys_sysctl, 149, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_setparam
++TRACE_SYSCALL_TABLE(sys_sched_setparam, sys_sched_setparam, 154, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_getparam
++TRACE_SYSCALL_TABLE(sys_sched_getparam, sys_sched_getparam, 155, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_setscheduler
++TRACE_SYSCALL_TABLE(sys_sched_setscheduler, sys_sched_setscheduler, 156, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_rr_get_interval
++TRACE_SYSCALL_TABLE(sys_sched_rr_get_interval, sys_sched_rr_get_interval, 161, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_nanosleep
++TRACE_SYSCALL_TABLE(sys_nanosleep, sys_nanosleep, 162, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getresuid16
++TRACE_SYSCALL_TABLE(sys_getresuid16, sys_getresuid16, 165, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_poll
++TRACE_SYSCALL_TABLE(sys_poll, sys_poll, 168, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getresgid16
++TRACE_SYSCALL_TABLE(sys_getresgid16, sys_getresgid16, 171, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rt_sigaction
++TRACE_SYSCALL_TABLE(sys_rt_sigaction, sys_rt_sigaction, 174, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rt_sigprocmask
++TRACE_SYSCALL_TABLE(sys_rt_sigprocmask, sys_rt_sigprocmask, 175, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rt_sigpending
++TRACE_SYSCALL_TABLE(sys_rt_sigpending, sys_rt_sigpending, 176, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rt_sigtimedwait
++TRACE_SYSCALL_TABLE(sys_rt_sigtimedwait, sys_rt_sigtimedwait, 177, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rt_sigqueueinfo
++TRACE_SYSCALL_TABLE(sys_rt_sigqueueinfo, sys_rt_sigqueueinfo, 178, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rt_sigsuspend
++TRACE_SYSCALL_TABLE(sys_rt_sigsuspend, sys_rt_sigsuspend, 179, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_chown16
++TRACE_SYSCALL_TABLE(sys_chown16, sys_chown16, 182, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getcwd
++TRACE_SYSCALL_TABLE(sys_getcwd, sys_getcwd, 183, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sendfile
++TRACE_SYSCALL_TABLE(sys_sendfile, sys_sendfile, 187, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getrlimit
++TRACE_SYSCALL_TABLE(sys_getrlimit, sys_getrlimit, 191, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_stat64
++TRACE_SYSCALL_TABLE(sys_stat64, sys_stat64, 195, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lstat64
++TRACE_SYSCALL_TABLE(sys_lstat64, sys_lstat64, 196, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fstat64
++TRACE_SYSCALL_TABLE(sys_fstat64, sys_fstat64, 197, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lchown
++TRACE_SYSCALL_TABLE(sys_lchown, sys_lchown, 198, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getgroups
++TRACE_SYSCALL_TABLE(sys_getgroups, sys_getgroups, 205, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setgroups
++TRACE_SYSCALL_TABLE(sys_setgroups, sys_setgroups, 206, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getresuid
++TRACE_SYSCALL_TABLE(sys_getresuid, sys_getresuid, 209, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getresgid
++TRACE_SYSCALL_TABLE(sys_getresgid, sys_getresgid, 211, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_chown
++TRACE_SYSCALL_TABLE(sys_chown, sys_chown, 212, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getdents64
++TRACE_SYSCALL_TABLE(sys_getdents64, sys_getdents64, 217, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_pivot_root
++TRACE_SYSCALL_TABLE(sys_pivot_root, sys_pivot_root, 218, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mincore
++TRACE_SYSCALL_TABLE(sys_mincore, sys_mincore, 219, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setxattr
++TRACE_SYSCALL_TABLE(sys_setxattr, sys_setxattr, 226, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lsetxattr
++TRACE_SYSCALL_TABLE(sys_lsetxattr, sys_lsetxattr, 227, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fsetxattr
++TRACE_SYSCALL_TABLE(sys_fsetxattr, sys_fsetxattr, 228, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getxattr
++TRACE_SYSCALL_TABLE(sys_getxattr, sys_getxattr, 229, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lgetxattr
++TRACE_SYSCALL_TABLE(sys_lgetxattr, sys_lgetxattr, 230, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fgetxattr
++TRACE_SYSCALL_TABLE(sys_fgetxattr, sys_fgetxattr, 231, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_listxattr
++TRACE_SYSCALL_TABLE(sys_listxattr, sys_listxattr, 232, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_llistxattr
++TRACE_SYSCALL_TABLE(sys_llistxattr, sys_llistxattr, 233, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_flistxattr
++TRACE_SYSCALL_TABLE(sys_flistxattr, sys_flistxattr, 234, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_removexattr
++TRACE_SYSCALL_TABLE(sys_removexattr, sys_removexattr, 235, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lremovexattr
++TRACE_SYSCALL_TABLE(sys_lremovexattr, sys_lremovexattr, 236, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fremovexattr
++TRACE_SYSCALL_TABLE(sys_fremovexattr, sys_fremovexattr, 237, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sendfile64
++TRACE_SYSCALL_TABLE(sys_sendfile64, sys_sendfile64, 239, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_futex
++TRACE_SYSCALL_TABLE(sys_futex, sys_futex, 240, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_setaffinity
++TRACE_SYSCALL_TABLE(sys_sched_setaffinity, sys_sched_setaffinity, 241, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_getaffinity
++TRACE_SYSCALL_TABLE(sys_sched_getaffinity, sys_sched_getaffinity, 242, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_io_setup
++TRACE_SYSCALL_TABLE(sys_io_setup, sys_io_setup, 243, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_io_getevents
++TRACE_SYSCALL_TABLE(sys_io_getevents, sys_io_getevents, 245, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_io_submit
++TRACE_SYSCALL_TABLE(sys_io_submit, sys_io_submit, 246, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_io_cancel
++TRACE_SYSCALL_TABLE(sys_io_cancel, sys_io_cancel, 247, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_epoll_ctl
++TRACE_SYSCALL_TABLE(sys_epoll_ctl, sys_epoll_ctl, 251, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_epoll_wait
++TRACE_SYSCALL_TABLE(sys_epoll_wait, sys_epoll_wait, 252, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_set_tid_address
++TRACE_SYSCALL_TABLE(sys_set_tid_address, sys_set_tid_address, 256, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timer_create
++TRACE_SYSCALL_TABLE(sys_timer_create, sys_timer_create, 257, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timer_settime
++TRACE_SYSCALL_TABLE(sys_timer_settime, sys_timer_settime, 258, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timer_gettime
++TRACE_SYSCALL_TABLE(sys_timer_gettime, sys_timer_gettime, 259, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_clock_settime
++TRACE_SYSCALL_TABLE(sys_clock_settime, sys_clock_settime, 262, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_clock_gettime
++TRACE_SYSCALL_TABLE(sys_clock_gettime, sys_clock_gettime, 263, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_clock_getres
++TRACE_SYSCALL_TABLE(sys_clock_getres, sys_clock_getres, 264, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_clock_nanosleep
++TRACE_SYSCALL_TABLE(sys_clock_nanosleep, sys_clock_nanosleep, 265, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_utimes
++TRACE_SYSCALL_TABLE(sys_utimes, sys_utimes, 269, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mq_open
++TRACE_SYSCALL_TABLE(sys_mq_open, sys_mq_open, 274, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mq_unlink
++TRACE_SYSCALL_TABLE(sys_mq_unlink, sys_mq_unlink, 275, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mq_timedsend
++TRACE_SYSCALL_TABLE(sys_mq_timedsend, sys_mq_timedsend, 276, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mq_timedreceive
++TRACE_SYSCALL_TABLE(sys_mq_timedreceive, sys_mq_timedreceive, 277, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mq_notify
++TRACE_SYSCALL_TABLE(sys_mq_notify, sys_mq_notify, 278, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mq_getsetattr
++TRACE_SYSCALL_TABLE(sys_mq_getsetattr, sys_mq_getsetattr, 279, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_waitid
++TRACE_SYSCALL_TABLE(sys_waitid, sys_waitid, 280, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_bind
++TRACE_SYSCALL_TABLE(sys_bind, sys_bind, 282, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_connect
++TRACE_SYSCALL_TABLE(sys_connect, sys_connect, 283, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_accept
++TRACE_SYSCALL_TABLE(sys_accept, sys_accept, 285, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getsockname
++TRACE_SYSCALL_TABLE(sys_getsockname, sys_getsockname, 286, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getpeername
++TRACE_SYSCALL_TABLE(sys_getpeername, sys_getpeername, 287, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_socketpair
++TRACE_SYSCALL_TABLE(sys_socketpair, sys_socketpair, 288, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_send
++TRACE_SYSCALL_TABLE(sys_send, sys_send, 289, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sendto
++TRACE_SYSCALL_TABLE(sys_sendto, sys_sendto, 290, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_recvfrom
++TRACE_SYSCALL_TABLE(sys_recvfrom, sys_recvfrom, 292, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setsockopt
++TRACE_SYSCALL_TABLE(sys_setsockopt, sys_setsockopt, 294, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getsockopt
++TRACE_SYSCALL_TABLE(sys_getsockopt, sys_getsockopt, 295, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sendmsg
++TRACE_SYSCALL_TABLE(sys_sendmsg, sys_sendmsg, 296, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_recvmsg
++TRACE_SYSCALL_TABLE(sys_recvmsg, sys_recvmsg, 297, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_semop
++TRACE_SYSCALL_TABLE(sys_semop, sys_semop, 298, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_msgsnd
++TRACE_SYSCALL_TABLE(sys_msgsnd, sys_msgsnd, 301, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_msgrcv
++TRACE_SYSCALL_TABLE(sys_msgrcv, sys_msgrcv, 302, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_msgctl
++TRACE_SYSCALL_TABLE(sys_msgctl, sys_msgctl, 304, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_shmat
++TRACE_SYSCALL_TABLE(sys_shmat, sys_shmat, 305, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_shmdt
++TRACE_SYSCALL_TABLE(sys_shmdt, sys_shmdt, 306, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_shmctl
++TRACE_SYSCALL_TABLE(sys_shmctl, sys_shmctl, 308, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_add_key
++TRACE_SYSCALL_TABLE(sys_add_key, sys_add_key, 309, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_request_key
++TRACE_SYSCALL_TABLE(sys_request_key, sys_request_key, 310, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_semtimedop
++TRACE_SYSCALL_TABLE(sys_semtimedop, sys_semtimedop, 312, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_inotify_add_watch
++TRACE_SYSCALL_TABLE(sys_inotify_add_watch, sys_inotify_add_watch, 317, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_openat
++TRACE_SYSCALL_TABLE(sys_openat, sys_openat, 322, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mkdirat
++TRACE_SYSCALL_TABLE(sys_mkdirat, sys_mkdirat, 323, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mknodat
++TRACE_SYSCALL_TABLE(sys_mknodat, sys_mknodat, 324, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fchownat
++TRACE_SYSCALL_TABLE(sys_fchownat, sys_fchownat, 325, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_futimesat
++TRACE_SYSCALL_TABLE(sys_futimesat, sys_futimesat, 326, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fstatat64
++TRACE_SYSCALL_TABLE(sys_fstatat64, sys_fstatat64, 327, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_unlinkat
++TRACE_SYSCALL_TABLE(sys_unlinkat, sys_unlinkat, 328, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_renameat
++TRACE_SYSCALL_TABLE(sys_renameat, sys_renameat, 329, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_linkat
++TRACE_SYSCALL_TABLE(sys_linkat, sys_linkat, 330, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_symlinkat
++TRACE_SYSCALL_TABLE(sys_symlinkat, sys_symlinkat, 331, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_readlinkat
++TRACE_SYSCALL_TABLE(sys_readlinkat, sys_readlinkat, 332, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fchmodat
++TRACE_SYSCALL_TABLE(sys_fchmodat, sys_fchmodat, 333, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_faccessat
++TRACE_SYSCALL_TABLE(sys_faccessat, sys_faccessat, 334, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_pselect6
++TRACE_SYSCALL_TABLE(sys_pselect6, sys_pselect6, 335, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ppoll
++TRACE_SYSCALL_TABLE(sys_ppoll, sys_ppoll, 336, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_set_robust_list
++TRACE_SYSCALL_TABLE(sys_set_robust_list, sys_set_robust_list, 338, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_get_robust_list
++TRACE_SYSCALL_TABLE(sys_get_robust_list, sys_get_robust_list, 339, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_splice
++TRACE_SYSCALL_TABLE(sys_splice, sys_splice, 340, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_vmsplice
++TRACE_SYSCALL_TABLE(sys_vmsplice, sys_vmsplice, 343, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getcpu
++TRACE_SYSCALL_TABLE(sys_getcpu, sys_getcpu, 345, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_epoll_pwait
++TRACE_SYSCALL_TABLE(sys_epoll_pwait, sys_epoll_pwait, 346, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_utimensat
++TRACE_SYSCALL_TABLE(sys_utimensat, sys_utimensat, 348, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_signalfd
++TRACE_SYSCALL_TABLE(sys_signalfd, sys_signalfd, 349, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timerfd_settime
++TRACE_SYSCALL_TABLE(sys_timerfd_settime, sys_timerfd_settime, 353, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timerfd_gettime
++TRACE_SYSCALL_TABLE(sys_timerfd_gettime, sys_timerfd_gettime, 354, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_signalfd4
++TRACE_SYSCALL_TABLE(sys_signalfd4, sys_signalfd4, 355, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_pipe2
++TRACE_SYSCALL_TABLE(sys_pipe2, sys_pipe2, 359, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_preadv
++TRACE_SYSCALL_TABLE(sys_preadv, sys_preadv, 361, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_pwritev
++TRACE_SYSCALL_TABLE(sys_pwritev, sys_pwritev, 362, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rt_tgsigqueueinfo
++TRACE_SYSCALL_TABLE(sys_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, 363, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_perf_event_open
++TRACE_SYSCALL_TABLE(sys_perf_event_open, sys_perf_event_open, 364, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_recvmmsg
++TRACE_SYSCALL_TABLE(sys_recvmmsg, sys_recvmmsg, 365, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_accept4
++TRACE_SYSCALL_TABLE(sys_accept4, sys_accept4, 366, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_prlimit64
++TRACE_SYSCALL_TABLE(sys_prlimit64, sys_prlimit64, 369, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_name_to_handle_at
++TRACE_SYSCALL_TABLE(sys_name_to_handle_at, sys_name_to_handle_at, 370, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_open_by_handle_at
++TRACE_SYSCALL_TABLE(sys_open_by_handle_at, sys_open_by_handle_at, 371, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_clock_adjtime
++TRACE_SYSCALL_TABLE(sys_clock_adjtime, sys_clock_adjtime, 372, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sendmmsg
++TRACE_SYSCALL_TABLE(sys_sendmmsg, sys_sendmmsg, 374, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_process_vm_readv
++TRACE_SYSCALL_TABLE(sys_process_vm_readv, sys_process_vm_readv, 376, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_process_vm_writev
++TRACE_SYSCALL_TABLE(sys_process_vm_writev, sys_process_vm_writev, 377, 6)
++#endif
++
++#endif /* CREATE_SYSCALL_TABLE */
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/arm-32-syscalls-3.4.25_pointers_override.h
+@@ -0,0 +1,39 @@
++
++#define OVERRIDE_TABLE_32_sys_mmap2
++
++
++#ifndef CREATE_SYSCALL_TABLE
++
++SC_TRACE_EVENT(sys_mmap2,
++ TP_PROTO(void *addr, size_t len, int prot,
++ int flags, int fd, off_t pgoff),
++ TP_ARGS(addr, len, prot, flags, fd, pgoff),
++ TP_STRUCT__entry(
++ __field_hex(void *, addr)
++ __field(size_t, len)
++ __field(int, prot)
++ __field(int, flags)
++ __field(int, fd)
++ __field(off_t, pgoff)),
++ TP_fast_assign(
++ tp_assign(addr, addr)
++ tp_assign(len, len)
++ tp_assign(prot, prot)
++ tp_assign(flags, flags)
++ tp_assign(fd, fd)
++ tp_assign(pgoff, pgoff)),
++ TP_printk()
++)
++
++#else /* CREATE_SYSCALL_TABLE */
++
++#define OVERRIDE_TABLE_32_sys_execve
++TRACE_SYSCALL_TABLE(sys_execve, sys_execve, 11, 3)
++#define OVERRIDE_TABLE_32_sys_clone
++TRACE_SYSCALL_TABLE(sys_clone, sys_clone, 120, 5)
++#define OVERRIDE_TABLE_32_sys_mmap2
++TRACE_SYSCALL_TABLE(sys_mmap2, sys_mmap2, 192, 6)
++
++#endif /* CREATE_SYSCALL_TABLE */
++
++
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/compat_syscalls_integers.h
+@@ -0,0 +1,3 @@
++#ifdef CONFIG_X86_64
++#include "x86-32-syscalls-3.1.0-rc6_integers.h"
++#endif
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/compat_syscalls_pointers.h
+@@ -0,0 +1,3 @@
++#ifdef CONFIG_X86_64
++#include "x86-32-syscalls-3.1.0-rc6_pointers.h"
++#endif
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/mips-32-syscalls-3.5.0_integers.h
+@@ -0,0 +1,677 @@
++/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */
++#ifndef CREATE_SYSCALL_TABLE
++
++#if !defined(_TRACE_SYSCALLS_INTEGERS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SYSCALLS_INTEGERS_H
++
++#include <linux/tracepoint.h>
++#include <linux/syscalls.h>
++#include "mips-32-syscalls-3.5.0_integers_override.h"
++#include "syscalls_integers_override.h"
++
++SC_DECLARE_EVENT_CLASS_NOARGS(syscalls_noargs,
++ TP_STRUCT__entry(),
++ TP_fast_assign(),
++ TP_printk()
++)
++#ifndef OVERRIDE_32_sys_getpid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getpid)
++#endif
++#ifndef OVERRIDE_32_sys_getuid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getuid)
++#endif
++#ifndef OVERRIDE_32_sys_pause
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_pause)
++#endif
++#ifndef OVERRIDE_32_sys_sync
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sync)
++#endif
++#ifndef OVERRIDE_32_sys_getgid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getgid)
++#endif
++#ifndef OVERRIDE_32_sys_geteuid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_geteuid)
++#endif
++#ifndef OVERRIDE_32_sys_getegid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getegid)
++#endif
++#ifndef OVERRIDE_32_sys_getppid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getppid)
++#endif
++#ifndef OVERRIDE_32_sys_getpgrp
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getpgrp)
++#endif
++#ifndef OVERRIDE_32_sys_setsid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_setsid)
++#endif
++#ifndef OVERRIDE_32_sys_sgetmask
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sgetmask)
++#endif
++#ifndef OVERRIDE_32_sys_vhangup
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_vhangup)
++#endif
++#ifndef OVERRIDE_32_sys_munlockall
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_munlockall)
++#endif
++#ifndef OVERRIDE_32_sys_sched_yield
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sched_yield)
++#endif
++#ifndef OVERRIDE_32_sys_exit
++SC_TRACE_EVENT(sys_exit,
++ TP_PROTO(int error_code),
++ TP_ARGS(error_code),
++ TP_STRUCT__entry(__field(int, error_code)),
++ TP_fast_assign(tp_assign(error_code, error_code)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_close
++SC_TRACE_EVENT(sys_close,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setuid
++SC_TRACE_EVENT(sys_setuid,
++ TP_PROTO(uid_t uid),
++ TP_ARGS(uid),
++ TP_STRUCT__entry(__field(uid_t, uid)),
++ TP_fast_assign(tp_assign(uid, uid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_alarm
++SC_TRACE_EVENT(sys_alarm,
++ TP_PROTO(unsigned int seconds),
++ TP_ARGS(seconds),
++ TP_STRUCT__entry(__field(unsigned int, seconds)),
++ TP_fast_assign(tp_assign(seconds, seconds)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_nice
++SC_TRACE_EVENT(sys_nice,
++ TP_PROTO(int increment),
++ TP_ARGS(increment),
++ TP_STRUCT__entry(__field(int, increment)),
++ TP_fast_assign(tp_assign(increment, increment)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_dup
++SC_TRACE_EVENT(sys_dup,
++ TP_PROTO(unsigned int fildes),
++ TP_ARGS(fildes),
++ TP_STRUCT__entry(__field(unsigned int, fildes)),
++ TP_fast_assign(tp_assign(fildes, fildes)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_brk
++SC_TRACE_EVENT(sys_brk,
++ TP_PROTO(unsigned long brk),
++ TP_ARGS(brk),
++ TP_STRUCT__entry(__field(unsigned long, brk)),
++ TP_fast_assign(tp_assign(brk, brk)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setgid
++SC_TRACE_EVENT(sys_setgid,
++ TP_PROTO(gid_t gid),
++ TP_ARGS(gid),
++ TP_STRUCT__entry(__field(gid_t, gid)),
++ TP_fast_assign(tp_assign(gid, gid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_umask
++SC_TRACE_EVENT(sys_umask,
++ TP_PROTO(int mask),
++ TP_ARGS(mask),
++ TP_STRUCT__entry(__field(int, mask)),
++ TP_fast_assign(tp_assign(mask, mask)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ssetmask
++SC_TRACE_EVENT(sys_ssetmask,
++ TP_PROTO(int newmask),
++ TP_ARGS(newmask),
++ TP_STRUCT__entry(__field(int, newmask)),
++ TP_fast_assign(tp_assign(newmask, newmask)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fsync
++SC_TRACE_EVENT(sys_fsync,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getpgid
++SC_TRACE_EVENT(sys_getpgid,
++ TP_PROTO(pid_t pid),
++ TP_ARGS(pid),
++ TP_STRUCT__entry(__field(pid_t, pid)),
++ TP_fast_assign(tp_assign(pid, pid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fchdir
++SC_TRACE_EVENT(sys_fchdir,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_personality
++SC_TRACE_EVENT(sys_personality,
++ TP_PROTO(unsigned int personality),
++ TP_ARGS(personality),
++ TP_STRUCT__entry(__field(unsigned int, personality)),
++ TP_fast_assign(tp_assign(personality, personality)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setfsuid
++SC_TRACE_EVENT(sys_setfsuid,
++ TP_PROTO(uid_t uid),
++ TP_ARGS(uid),
++ TP_STRUCT__entry(__field(uid_t, uid)),
++ TP_fast_assign(tp_assign(uid, uid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setfsgid
++SC_TRACE_EVENT(sys_setfsgid,
++ TP_PROTO(gid_t gid),
++ TP_ARGS(gid),
++ TP_STRUCT__entry(__field(gid_t, gid)),
++ TP_fast_assign(tp_assign(gid, gid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getsid
++SC_TRACE_EVENT(sys_getsid,
++ TP_PROTO(pid_t pid),
++ TP_ARGS(pid),
++ TP_STRUCT__entry(__field(pid_t, pid)),
++ TP_fast_assign(tp_assign(pid, pid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fdatasync
++SC_TRACE_EVENT(sys_fdatasync,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mlockall
++SC_TRACE_EVENT(sys_mlockall,
++ TP_PROTO(int flags),
++ TP_ARGS(flags),
++ TP_STRUCT__entry(__field(int, flags)),
++ TP_fast_assign(tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_getscheduler
++SC_TRACE_EVENT(sys_sched_getscheduler,
++ TP_PROTO(pid_t pid),
++ TP_ARGS(pid),
++ TP_STRUCT__entry(__field(pid_t, pid)),
++ TP_fast_assign(tp_assign(pid, pid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_get_priority_max
++SC_TRACE_EVENT(sys_sched_get_priority_max,
++ TP_PROTO(int policy),
++ TP_ARGS(policy),
++ TP_STRUCT__entry(__field(int, policy)),
++ TP_fast_assign(tp_assign(policy, policy)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_get_priority_min
++SC_TRACE_EVENT(sys_sched_get_priority_min,
++ TP_PROTO(int policy),
++ TP_ARGS(policy),
++ TP_STRUCT__entry(__field(int, policy)),
++ TP_fast_assign(tp_assign(policy, policy)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_kill
++SC_TRACE_EVENT(sys_kill,
++ TP_PROTO(pid_t pid, int sig),
++ TP_ARGS(pid, sig),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setpgid
++SC_TRACE_EVENT(sys_setpgid,
++ TP_PROTO(pid_t pid, pid_t pgid),
++ TP_ARGS(pid, pgid),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(pid_t, pgid)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(pgid, pgid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_dup2
++SC_TRACE_EVENT(sys_dup2,
++ TP_PROTO(unsigned int oldfd, unsigned int newfd),
++ TP_ARGS(oldfd, newfd),
++ TP_STRUCT__entry(__field(unsigned int, oldfd) __field(unsigned int, newfd)),
++ TP_fast_assign(tp_assign(oldfd, oldfd) tp_assign(newfd, newfd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setreuid
++SC_TRACE_EVENT(sys_setreuid,
++ TP_PROTO(uid_t ruid, uid_t euid),
++ TP_ARGS(ruid, euid),
++ TP_STRUCT__entry(__field(uid_t, ruid) __field(uid_t, euid)),
++ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setregid
++SC_TRACE_EVENT(sys_setregid,
++ TP_PROTO(gid_t rgid, gid_t egid),
++ TP_ARGS(rgid, egid),
++ TP_STRUCT__entry(__field(gid_t, rgid) __field(gid_t, egid)),
++ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_munmap
++SC_TRACE_EVENT(sys_munmap,
++ TP_PROTO(unsigned long addr, size_t len),
++ TP_ARGS(addr, len),
++ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(size_t, len)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ftruncate
++SC_TRACE_EVENT(sys_ftruncate,
++ TP_PROTO(unsigned int fd, unsigned long length),
++ TP_ARGS(fd, length),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned long, length)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(length, length)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fchmod
++SC_TRACE_EVENT(sys_fchmod,
++ TP_PROTO(unsigned int fd, umode_t mode),
++ TP_ARGS(fd, mode),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(umode_t, mode)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getpriority
++SC_TRACE_EVENT(sys_getpriority,
++ TP_PROTO(int which, int who),
++ TP_ARGS(which, who),
++ TP_STRUCT__entry(__field(int, which) __field(int, who)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_bdflush
++SC_TRACE_EVENT(sys_bdflush,
++ TP_PROTO(int func, long data),
++ TP_ARGS(func, data),
++ TP_STRUCT__entry(__field(int, func) __field(long, data)),
++ TP_fast_assign(tp_assign(func, func) tp_assign(data, data)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_flock
++SC_TRACE_EVENT(sys_flock,
++ TP_PROTO(unsigned int fd, unsigned int cmd),
++ TP_ARGS(fd, cmd),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mlock
++SC_TRACE_EVENT(sys_mlock,
++ TP_PROTO(unsigned long start, size_t len),
++ TP_ARGS(start, len),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_munlock
++SC_TRACE_EVENT(sys_munlock,
++ TP_PROTO(unsigned long start, size_t len),
++ TP_ARGS(start, len),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lseek
++SC_TRACE_EVENT(sys_lseek,
++ TP_PROTO(unsigned int fd, off_t offset, unsigned int origin),
++ TP_ARGS(fd, offset, origin),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(off_t, offset) __field(unsigned int, origin)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(offset, offset) tp_assign(origin, origin)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ioctl
++SC_TRACE_EVENT(sys_ioctl,
++ TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
++ TP_ARGS(fd, cmd, arg),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fcntl
++SC_TRACE_EVENT(sys_fcntl,
++ TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
++ TP_ARGS(fd, cmd, arg),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fchown
++SC_TRACE_EVENT(sys_fchown,
++ TP_PROTO(unsigned int fd, uid_t user, gid_t group),
++ TP_ARGS(fd, user, group),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(uid_t, user) __field(gid_t, group)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setpriority
++SC_TRACE_EVENT(sys_setpriority,
++ TP_PROTO(int which, int who, int niceval),
++ TP_ARGS(which, who, niceval),
++ TP_STRUCT__entry(__field(int, which) __field(int, who) __field(int, niceval)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who) tp_assign(niceval, niceval)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mprotect
++SC_TRACE_EVENT(sys_mprotect,
++ TP_PROTO(unsigned long start, size_t len, unsigned long prot),
++ TP_ARGS(start, len, prot),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field(unsigned long, prot)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(prot, prot)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sysfs
++SC_TRACE_EVENT(sys_sysfs,
++ TP_PROTO(int option, unsigned long arg1, unsigned long arg2),
++ TP_ARGS(option, arg1, arg2),
++ TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg1) __field(unsigned long, arg2)),
++ TP_fast_assign(tp_assign(option, option) tp_assign(arg1, arg1) tp_assign(arg2, arg2)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_msync
++SC_TRACE_EVENT(sys_msync,
++ TP_PROTO(unsigned long start, size_t len, int flags),
++ TP_ARGS(start, len, flags),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field(int, flags)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_cacheflush
++SC_TRACE_EVENT(sys_cacheflush,
++ TP_PROTO(unsigned long addr, unsigned long bytes, unsigned int cache),
++ TP_ARGS(addr, bytes, cache),
++ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(unsigned long, bytes) __field(unsigned int, cache)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(bytes, bytes) tp_assign(cache, cache)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ptrace
++SC_TRACE_EVENT(sys_ptrace,
++ TP_PROTO(long request, long pid, unsigned long addr, unsigned long data),
++ TP_ARGS(request, pid, addr, data),
++ TP_STRUCT__entry(__field(long, request) __field(long, pid) __field_hex(unsigned long, addr) __field(unsigned long, data)),
++ TP_fast_assign(tp_assign(request, request) tp_assign(pid, pid) tp_assign(addr, addr) tp_assign(data, data)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mremap
++SC_TRACE_EVENT(sys_mremap,
++ TP_PROTO(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr),
++ TP_ARGS(addr, old_len, new_len, flags, new_addr),
++ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(unsigned long, old_len) __field(unsigned long, new_len) __field(unsigned long, flags) __field_hex(unsigned long, new_addr)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(old_len, old_len) tp_assign(new_len, new_len) tp_assign(flags, flags) tp_assign(new_addr, new_addr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mips_mmap
++SC_TRACE_EVENT(sys_mips_mmap,
++ TP_PROTO(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, off_t offset),
++ TP_ARGS(addr, len, prot, flags, fd, offset),
++ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(unsigned long, len) __field(unsigned long, prot) __field(unsigned long, flags) __field(unsigned long, fd) __field(off_t, offset)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len) tp_assign(prot, prot) tp_assign(flags, flags) tp_assign(fd, fd) tp_assign(offset, offset)),
++ TP_printk()
++)
++#endif
++
++#endif /* _TRACE_SYSCALLS_INTEGERS_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
++
++#else /* CREATE_SYSCALL_TABLE */
++
++#include "mips-32-syscalls-3.5.0_integers_override.h"
++#include "syscalls_integers_override.h"
++
++#ifndef OVERRIDE_TABLE_32_sys_getpid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getpid, 4041, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getuid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getuid, 4049, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_pause
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_pause, 4059, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sync
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sync, 4073, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getgid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getgid, 4095, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_geteuid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_geteuid, 4099, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getegid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getegid, 4101, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getppid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getppid, 4129, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getpgrp
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getpgrp, 4131, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setsid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_setsid, 4133, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sgetmask
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sgetmask, 4137, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_vhangup
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_vhangup, 4223, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_munlockall
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_munlockall, 4315, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_yield
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sched_yield, 4325, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_exit
++TRACE_SYSCALL_TABLE(sys_exit, sys_exit, 4003, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_close
++TRACE_SYSCALL_TABLE(sys_close, sys_close, 4013, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lseek
++TRACE_SYSCALL_TABLE(sys_lseek, sys_lseek, 4039, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setuid
++TRACE_SYSCALL_TABLE(sys_setuid, sys_setuid, 4047, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ptrace
++TRACE_SYSCALL_TABLE(sys_ptrace, sys_ptrace, 4053, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_alarm
++TRACE_SYSCALL_TABLE(sys_alarm, sys_alarm, 4055, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_nice
++TRACE_SYSCALL_TABLE(sys_nice, sys_nice, 4069, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_kill
++TRACE_SYSCALL_TABLE(sys_kill, sys_kill, 4075, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_dup
++TRACE_SYSCALL_TABLE(sys_dup, sys_dup, 4083, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_brk
++TRACE_SYSCALL_TABLE(sys_brk, sys_brk, 4091, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setgid
++TRACE_SYSCALL_TABLE(sys_setgid, sys_setgid, 4093, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ioctl
++TRACE_SYSCALL_TABLE(sys_ioctl, sys_ioctl, 4109, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fcntl
++TRACE_SYSCALL_TABLE(sys_fcntl, sys_fcntl, 4111, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setpgid
++TRACE_SYSCALL_TABLE(sys_setpgid, sys_setpgid, 4115, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_umask
++TRACE_SYSCALL_TABLE(sys_umask, sys_umask, 4121, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_dup2
++TRACE_SYSCALL_TABLE(sys_dup2, sys_dup2, 4127, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ssetmask
++TRACE_SYSCALL_TABLE(sys_ssetmask, sys_ssetmask, 4139, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setreuid
++TRACE_SYSCALL_TABLE(sys_setreuid, sys_setreuid, 4141, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setregid
++TRACE_SYSCALL_TABLE(sys_setregid, sys_setregid, 4143, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mips_mmap
++TRACE_SYSCALL_TABLE(sys_mips_mmap, sys_mips_mmap, 4181, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_munmap
++TRACE_SYSCALL_TABLE(sys_munmap, sys_munmap, 4183, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ftruncate
++TRACE_SYSCALL_TABLE(sys_ftruncate, sys_ftruncate, 4187, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fchmod
++TRACE_SYSCALL_TABLE(sys_fchmod, sys_fchmod, 4189, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fchown
++TRACE_SYSCALL_TABLE(sys_fchown, sys_fchown, 4191, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getpriority
++TRACE_SYSCALL_TABLE(sys_getpriority, sys_getpriority, 4193, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setpriority
++TRACE_SYSCALL_TABLE(sys_setpriority, sys_setpriority, 4195, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fsync
++TRACE_SYSCALL_TABLE(sys_fsync, sys_fsync, 4237, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mprotect
++TRACE_SYSCALL_TABLE(sys_mprotect, sys_mprotect, 4251, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getpgid
++TRACE_SYSCALL_TABLE(sys_getpgid, sys_getpgid, 4265, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fchdir
++TRACE_SYSCALL_TABLE(sys_fchdir, sys_fchdir, 4267, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_bdflush
++TRACE_SYSCALL_TABLE(sys_bdflush, sys_bdflush, 4269, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sysfs
++TRACE_SYSCALL_TABLE(sys_sysfs, sys_sysfs, 4271, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_personality
++TRACE_SYSCALL_TABLE(sys_personality, sys_personality, 4273, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setfsuid
++TRACE_SYSCALL_TABLE(sys_setfsuid, sys_setfsuid, 4277, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setfsgid
++TRACE_SYSCALL_TABLE(sys_setfsgid, sys_setfsgid, 4279, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_flock
++TRACE_SYSCALL_TABLE(sys_flock, sys_flock, 4287, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_msync
++TRACE_SYSCALL_TABLE(sys_msync, sys_msync, 4289, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_cacheflush
++TRACE_SYSCALL_TABLE(sys_cacheflush, sys_cacheflush, 4295, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getsid
++TRACE_SYSCALL_TABLE(sys_getsid, sys_getsid, 4303, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fdatasync
++TRACE_SYSCALL_TABLE(sys_fdatasync, sys_fdatasync, 4305, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mlock
++TRACE_SYSCALL_TABLE(sys_mlock, sys_mlock, 4309, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_munlock
++TRACE_SYSCALL_TABLE(sys_munlock, sys_munlock, 4311, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mlockall
++TRACE_SYSCALL_TABLE(sys_mlockall, sys_mlockall, 4313, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_getscheduler
++TRACE_SYSCALL_TABLE(sys_sched_getscheduler, sys_sched_getscheduler, 4323, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_get_priority_max
++TRACE_SYSCALL_TABLE(sys_sched_get_priority_max, sys_sched_get_priority_max, 4327, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_get_priority_min
++TRACE_SYSCALL_TABLE(sys_sched_get_priority_min, sys_sched_get_priority_min, 4329, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mremap
++TRACE_SYSCALL_TABLE(sys_mremap, sys_mremap, 4335, 5)
++#endif
++
++#endif /* CREATE_SYSCALL_TABLE */
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/mips-32-syscalls-3.5.0_integers_override.h
+@@ -0,0 +1,3 @@
++/*
++ * this is a place-holder for MIPS integer syscall definition override.
++ */
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/mips-32-syscalls-3.5.0_pointers.h
+@@ -0,0 +1,984 @@
++/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */
++#ifndef CREATE_SYSCALL_TABLE
++
++#if !defined(_TRACE_SYSCALLS_POINTERS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SYSCALLS_POINTERS_H
++
++#include <linux/tracepoint.h>
++#include <linux/syscalls.h>
++#include "mips-32-syscalls-3.5.0_pointers_override.h"
++#include "syscalls_pointers_override.h"
++
++#ifndef OVERRIDE_32_sys_unlink
++SC_TRACE_EVENT(sys_unlink,
++ TP_PROTO(const char * pathname),
++ TP_ARGS(pathname),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_chdir
++SC_TRACE_EVENT(sys_chdir,
++ TP_PROTO(const char * filename),
++ TP_ARGS(filename),
++ TP_STRUCT__entry(__string_from_user(filename, filename)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_time
++SC_TRACE_EVENT(sys_time,
++ TP_PROTO(time_t * tloc),
++ TP_ARGS(tloc),
++ TP_STRUCT__entry(__field_hex(time_t *, tloc)),
++ TP_fast_assign(tp_assign(tloc, tloc)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_oldumount
++SC_TRACE_EVENT(sys_oldumount,
++ TP_PROTO(char * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__string_from_user(name, name)),
++ TP_fast_assign(tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_stime
++SC_TRACE_EVENT(sys_stime,
++ TP_PROTO(time_t * tptr),
++ TP_ARGS(tptr),
++ TP_STRUCT__entry(__field_hex(time_t *, tptr)),
++ TP_fast_assign(tp_assign(tptr, tptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rmdir
++SC_TRACE_EVENT(sys_rmdir,
++ TP_PROTO(const char * pathname),
++ TP_ARGS(pathname),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_times
++SC_TRACE_EVENT(sys_times,
++ TP_PROTO(struct tms * tbuf),
++ TP_ARGS(tbuf),
++ TP_STRUCT__entry(__field_hex(struct tms *, tbuf)),
++ TP_fast_assign(tp_assign(tbuf, tbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_olduname
++SC_TRACE_EVENT(sys_olduname,
++ TP_PROTO(struct oldold_utsname * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__field_hex(struct oldold_utsname *, name)),
++ TP_fast_assign(tp_assign(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_chroot
++SC_TRACE_EVENT(sys_chroot,
++ TP_PROTO(const char * filename),
++ TP_ARGS(filename),
++ TP_STRUCT__entry(__string_from_user(filename, filename)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sigpending
++SC_TRACE_EVENT(sys_sigpending,
++ TP_PROTO(old_sigset_t * set),
++ TP_ARGS(set),
++ TP_STRUCT__entry(__field_hex(old_sigset_t *, set)),
++ TP_fast_assign(tp_assign(set, set)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_uselib
++SC_TRACE_EVENT(sys_uselib,
++ TP_PROTO(const char * library),
++ TP_ARGS(library),
++ TP_STRUCT__entry(__field_hex(const char *, library)),
++ TP_fast_assign(tp_assign(library, library)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_uname
++SC_TRACE_EVENT(sys_uname,
++ TP_PROTO(struct old_utsname * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__field_hex(struct old_utsname *, name)),
++ TP_fast_assign(tp_assign(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_swapoff
++SC_TRACE_EVENT(sys_swapoff,
++ TP_PROTO(const char * specialfile),
++ TP_ARGS(specialfile),
++ TP_STRUCT__entry(__string_from_user(specialfile, specialfile)),
++ TP_fast_assign(tp_copy_string_from_user(specialfile, specialfile)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sysinfo
++SC_TRACE_EVENT(sys_sysinfo,
++ TP_PROTO(struct sysinfo * info),
++ TP_ARGS(info),
++ TP_STRUCT__entry(__field_hex(struct sysinfo *, info)),
++ TP_fast_assign(tp_assign(info, info)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_newuname
++SC_TRACE_EVENT(sys_newuname,
++ TP_PROTO(struct new_utsname * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__field_hex(struct new_utsname *, name)),
++ TP_fast_assign(tp_assign(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_adjtimex
++SC_TRACE_EVENT(sys_adjtimex,
++ TP_PROTO(struct timex * txc_p),
++ TP_ARGS(txc_p),
++ TP_STRUCT__entry(__field_hex(struct timex *, txc_p)),
++ TP_fast_assign(tp_assign(txc_p, txc_p)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sysctl
++SC_TRACE_EVENT(sys_sysctl,
++ TP_PROTO(struct __sysctl_args * args),
++ TP_ARGS(args),
++ TP_STRUCT__entry(__field_hex(struct __sysctl_args *, args)),
++ TP_fast_assign(tp_assign(args, args)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_creat
++SC_TRACE_EVENT(sys_creat,
++ TP_PROTO(const char * pathname, umode_t mode),
++ TP_ARGS(pathname, mode),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(umode_t, mode)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_link
++SC_TRACE_EVENT(sys_link,
++ TP_PROTO(const char * oldname, const char * newname),
++ TP_ARGS(oldname, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_chmod
++SC_TRACE_EVENT(sys_chmod,
++ TP_PROTO(const char * filename, umode_t mode),
++ TP_ARGS(filename, mode),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(umode_t, mode)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_utime
++SC_TRACE_EVENT(sys_utime,
++ TP_PROTO(char * filename, struct utimbuf * times),
++ TP_ARGS(filename, times),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct utimbuf *, times)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(times, times)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_access
++SC_TRACE_EVENT(sys_access,
++ TP_PROTO(const char * filename, int mode),
++ TP_ARGS(filename, mode),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, mode)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rename
++SC_TRACE_EVENT(sys_rename,
++ TP_PROTO(const char * oldname, const char * newname),
++ TP_ARGS(oldname, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mkdir
++SC_TRACE_EVENT(sys_mkdir,
++ TP_PROTO(const char * pathname, umode_t mode),
++ TP_ARGS(pathname, mode),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(umode_t, mode)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_umount
++SC_TRACE_EVENT(sys_umount,
++ TP_PROTO(char * name, int flags),
++ TP_ARGS(name, flags),
++ TP_STRUCT__entry(__string_from_user(name, name) __field(int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ustat
++SC_TRACE_EVENT(sys_ustat,
++ TP_PROTO(unsigned dev, struct ustat * ubuf),
++ TP_ARGS(dev, ubuf),
++ TP_STRUCT__entry(__field(unsigned, dev) __field_hex(struct ustat *, ubuf)),
++ TP_fast_assign(tp_assign(dev, dev) tp_assign(ubuf, ubuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sethostname
++SC_TRACE_EVENT(sys_sethostname,
++ TP_PROTO(char * name, int len),
++ TP_ARGS(name, len),
++ TP_STRUCT__entry(__string_from_user(name, name) __field(int, len)),
++ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setrlimit
++SC_TRACE_EVENT(sys_setrlimit,
++ TP_PROTO(unsigned int resource, struct rlimit * rlim),
++ TP_ARGS(resource, rlim),
++ TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
++ TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getrlimit
++SC_TRACE_EVENT(sys_getrlimit,
++ TP_PROTO(unsigned int resource, struct rlimit * rlim),
++ TP_ARGS(resource, rlim),
++ TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
++ TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getrusage
++SC_TRACE_EVENT(sys_getrusage,
++ TP_PROTO(int who, struct rusage * ru),
++ TP_ARGS(who, ru),
++ TP_STRUCT__entry(__field(int, who) __field_hex(struct rusage *, ru)),
++ TP_fast_assign(tp_assign(who, who) tp_assign(ru, ru)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_gettimeofday
++SC_TRACE_EVENT(sys_gettimeofday,
++ TP_PROTO(struct timeval * tv, struct timezone * tz),
++ TP_ARGS(tv, tz),
++ TP_STRUCT__entry(__field_hex(struct timeval *, tv) __field_hex(struct timezone *, tz)),
++ TP_fast_assign(tp_assign(tv, tv) tp_assign(tz, tz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_settimeofday
++SC_TRACE_EVENT(sys_settimeofday,
++ TP_PROTO(struct timeval * tv, struct timezone * tz),
++ TP_ARGS(tv, tz),
++ TP_STRUCT__entry(__field_hex(struct timeval *, tv) __field_hex(struct timezone *, tz)),
++ TP_fast_assign(tp_assign(tv, tv) tp_assign(tz, tz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getgroups
++SC_TRACE_EVENT(sys_getgroups,
++ TP_PROTO(int gidsetsize, gid_t * grouplist),
++ TP_ARGS(gidsetsize, grouplist),
++ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(gid_t *, grouplist)),
++ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setgroups
++SC_TRACE_EVENT(sys_setgroups,
++ TP_PROTO(int gidsetsize, gid_t * grouplist),
++ TP_ARGS(gidsetsize, grouplist),
++ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(gid_t *, grouplist)),
++ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_symlink
++SC_TRACE_EVENT(sys_symlink,
++ TP_PROTO(const char * oldname, const char * newname),
++ TP_ARGS(oldname, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_swapon
++SC_TRACE_EVENT(sys_swapon,
++ TP_PROTO(const char * specialfile, int swap_flags),
++ TP_ARGS(specialfile, swap_flags),
++ TP_STRUCT__entry(__string_from_user(specialfile, specialfile) __field(int, swap_flags)),
++ TP_fast_assign(tp_copy_string_from_user(specialfile, specialfile) tp_assign(swap_flags, swap_flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_truncate
++SC_TRACE_EVENT(sys_truncate,
++ TP_PROTO(const char * path, long length),
++ TP_ARGS(path, length),
++ TP_STRUCT__entry(__string_from_user(path, path) __field(long, length)),
++ TP_fast_assign(tp_copy_string_from_user(path, path) tp_assign(length, length)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_statfs
++SC_TRACE_EVENT(sys_statfs,
++ TP_PROTO(const char * pathname, struct statfs * buf),
++ TP_ARGS(pathname, buf),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(struct statfs *, buf)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fstatfs
++SC_TRACE_EVENT(sys_fstatfs,
++ TP_PROTO(unsigned int fd, struct statfs * buf),
++ TP_ARGS(fd, buf),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct statfs *, buf)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_socketcall
++SC_TRACE_EVENT(sys_socketcall,
++ TP_PROTO(int call, unsigned long * args),
++ TP_ARGS(call, args),
++ TP_STRUCT__entry(__field(int, call) __field_hex(unsigned long *, args)),
++ TP_fast_assign(tp_assign(call, call) tp_assign(args, args)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getitimer
++SC_TRACE_EVENT(sys_getitimer,
++ TP_PROTO(int which, struct itimerval * value),
++ TP_ARGS(which, value),
++ TP_STRUCT__entry(__field(int, which) __field_hex(struct itimerval *, value)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(value, value)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_newstat
++SC_TRACE_EVENT(sys_newstat,
++ TP_PROTO(const char * filename, struct stat * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_newlstat
++SC_TRACE_EVENT(sys_newlstat,
++ TP_PROTO(const char * filename, struct stat * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_newfstat
++SC_TRACE_EVENT(sys_newfstat,
++ TP_PROTO(unsigned int fd, struct stat * statbuf),
++ TP_ARGS(fd, statbuf),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct stat *, statbuf)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setdomainname
++SC_TRACE_EVENT(sys_setdomainname,
++ TP_PROTO(char * name, int len),
++ TP_ARGS(name, len),
++ TP_STRUCT__entry(__string_from_user(name, name) __field(int, len)),
++ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_delete_module
++SC_TRACE_EVENT(sys_delete_module,
++ TP_PROTO(const char * name_user, unsigned int flags),
++ TP_ARGS(name_user, flags),
++ TP_STRUCT__entry(__string_from_user(name_user, name_user) __field(unsigned int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(name_user, name_user) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_setparam
++SC_TRACE_EVENT(sys_sched_setparam,
++ TP_PROTO(pid_t pid, struct sched_param * param),
++ TP_ARGS(pid, param),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct sched_param *, param)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(param, param)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_getparam
++SC_TRACE_EVENT(sys_sched_getparam,
++ TP_PROTO(pid_t pid, struct sched_param * param),
++ TP_ARGS(pid, param),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct sched_param *, param)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(param, param)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_rr_get_interval
++SC_TRACE_EVENT(sys_sched_rr_get_interval,
++ TP_PROTO(pid_t pid, struct timespec * interval),
++ TP_ARGS(pid, interval),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct timespec *, interval)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(interval, interval)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_nanosleep
++SC_TRACE_EVENT(sys_nanosleep,
++ TP_PROTO(struct timespec * rqtp, struct timespec * rmtp),
++ TP_ARGS(rqtp, rmtp),
++ TP_STRUCT__entry(__field_hex(struct timespec *, rqtp) __field_hex(struct timespec *, rmtp)),
++ TP_fast_assign(tp_assign(rqtp, rqtp) tp_assign(rmtp, rmtp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_read
++SC_TRACE_EVENT(sys_read,
++ TP_PROTO(unsigned int fd, char * buf, size_t count),
++ TP_ARGS(fd, buf, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(char *, buf) __field(size_t, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_write
++SC_TRACE_EVENT(sys_write,
++ TP_PROTO(unsigned int fd, const char * buf, size_t count),
++ TP_ARGS(fd, buf, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(const char *, buf) __field(size_t, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_open
++SC_TRACE_EVENT(sys_open,
++ TP_PROTO(const char * filename, int flags, umode_t mode),
++ TP_ARGS(filename, flags, mode),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, flags) __field(umode_t, mode)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(flags, flags) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_waitpid
++SC_TRACE_EVENT(sys_waitpid,
++ TP_PROTO(pid_t pid, int * stat_addr, int options),
++ TP_ARGS(pid, stat_addr, options),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(int *, stat_addr) __field(int, options)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(stat_addr, stat_addr) tp_assign(options, options)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mknod
++SC_TRACE_EVENT(sys_mknod,
++ TP_PROTO(const char * filename, umode_t mode, unsigned dev),
++ TP_ARGS(filename, mode, dev),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(umode_t, mode) __field(unsigned, dev)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode) tp_assign(dev, dev)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lchown
++SC_TRACE_EVENT(sys_lchown,
++ TP_PROTO(const char * filename, uid_t user, gid_t group),
++ TP_ARGS(filename, user, group),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sigaction
++SC_TRACE_EVENT(sys_sigaction,
++ TP_PROTO(int sig, const struct sigaction * act, struct sigaction * oact),
++ TP_ARGS(sig, act, oact),
++ TP_STRUCT__entry(__field(int, sig) __field_hex(const struct sigaction *, act) __field_hex(struct sigaction *, oact)),
++ TP_fast_assign(tp_assign(sig, sig) tp_assign(act, act) tp_assign(oact, oact)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_readlink
++SC_TRACE_EVENT(sys_readlink,
++ TP_PROTO(const char * path, char * buf, int bufsiz),
++ TP_ARGS(path, buf, bufsiz),
++ TP_STRUCT__entry(__string_from_user(path, path) __field_hex(char *, buf) __field(int, bufsiz)),
++ TP_fast_assign(tp_copy_string_from_user(path, path) tp_assign(buf, buf) tp_assign(bufsiz, bufsiz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_old_readdir
++SC_TRACE_EVENT(sys_old_readdir,
++ TP_PROTO(unsigned int fd, struct old_linux_dirent * dirent, unsigned int count),
++ TP_ARGS(fd, dirent, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct old_linux_dirent *, dirent) __field(unsigned int, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_syslog
++SC_TRACE_EVENT(sys_syslog,
++ TP_PROTO(int type, char * buf, int len),
++ TP_ARGS(type, buf, len),
++ TP_STRUCT__entry(__field(int, type) __field_hex(char *, buf) __field(int, len)),
++ TP_fast_assign(tp_assign(type, type) tp_assign(buf, buf) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setitimer
++SC_TRACE_EVENT(sys_setitimer,
++ TP_PROTO(int which, struct itimerval * value, struct itimerval * ovalue),
++ TP_ARGS(which, value, ovalue),
++ TP_STRUCT__entry(__field(int, which) __field_hex(struct itimerval *, value) __field_hex(struct itimerval *, ovalue)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(value, value) tp_assign(ovalue, ovalue)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sigprocmask
++SC_TRACE_EVENT(sys_sigprocmask,
++ TP_PROTO(int how, old_sigset_t * nset, old_sigset_t * oset),
++ TP_ARGS(how, nset, oset),
++ TP_STRUCT__entry(__field(int, how) __field_hex(old_sigset_t *, nset) __field_hex(old_sigset_t *, oset)),
++ TP_fast_assign(tp_assign(how, how) tp_assign(nset, nset) tp_assign(oset, oset)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_init_module
++SC_TRACE_EVENT(sys_init_module,
++ TP_PROTO(void * umod, unsigned long len, const char * uargs),
++ TP_ARGS(umod, len, uargs),
++ TP_STRUCT__entry(__field_hex(void *, umod) __field(unsigned long, len) __field_hex(const char *, uargs)),
++ TP_fast_assign(tp_assign(umod, umod) tp_assign(len, len) tp_assign(uargs, uargs)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getdents
++SC_TRACE_EVENT(sys_getdents,
++ TP_PROTO(unsigned int fd, struct linux_dirent * dirent, unsigned int count),
++ TP_ARGS(fd, dirent, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct linux_dirent *, dirent) __field(unsigned int, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_readv
++SC_TRACE_EVENT(sys_readv,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen),
++ TP_ARGS(fd, vec, vlen),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_writev
++SC_TRACE_EVENT(sys_writev,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen),
++ TP_ARGS(fd, vec, vlen),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_cachectl
++SC_TRACE_EVENT(sys_cachectl,
++ TP_PROTO(char * addr, int nbytes, int op),
++ TP_ARGS(addr, nbytes, op),
++ TP_STRUCT__entry(__field_hex(char *, addr) __field(int, nbytes) __field(int, op)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(nbytes, nbytes) tp_assign(op, op)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_setscheduler
++SC_TRACE_EVENT(sys_sched_setscheduler,
++ TP_PROTO(pid_t pid, int policy, struct sched_param * param),
++ TP_ARGS(pid, policy, param),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, policy) __field_hex(struct sched_param *, param)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(policy, policy) tp_assign(param, param)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_accept
++SC_TRACE_EVENT(sys_accept,
++ TP_PROTO(int fd, struct sockaddr * upeer_sockaddr, int * upeer_addrlen),
++ TP_ARGS(fd, upeer_sockaddr, upeer_addrlen),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, upeer_sockaddr) __field_hex(int *, upeer_addrlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(upeer_sockaddr, upeer_sockaddr) tp_assign(upeer_addrlen, upeer_addrlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_bind
++SC_TRACE_EVENT(sys_bind,
++ TP_PROTO(int fd, struct sockaddr * umyaddr, int addrlen),
++ TP_ARGS(fd, umyaddr, addrlen),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, umyaddr) __field_hex(int, addrlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(umyaddr, umyaddr) tp_assign(addrlen, addrlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_connect
++SC_TRACE_EVENT(sys_connect,
++ TP_PROTO(int fd, struct sockaddr * uservaddr, int addrlen),
++ TP_ARGS(fd, uservaddr, addrlen),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, uservaddr) __field_hex(int, addrlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(uservaddr, uservaddr) tp_assign(addrlen, addrlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getpeername
++SC_TRACE_EVENT(sys_getpeername,
++ TP_PROTO(int fd, struct sockaddr * usockaddr, int * usockaddr_len),
++ TP_ARGS(fd, usockaddr, usockaddr_len),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, usockaddr) __field_hex(int *, usockaddr_len)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(usockaddr, usockaddr) tp_assign(usockaddr_len, usockaddr_len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getsockname
++SC_TRACE_EVENT(sys_getsockname,
++ TP_PROTO(int fd, struct sockaddr * usockaddr, int * usockaddr_len),
++ TP_ARGS(fd, usockaddr, usockaddr_len),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, usockaddr) __field_hex(int *, usockaddr_len)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(usockaddr, usockaddr) tp_assign(usockaddr_len, usockaddr_len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_reboot
++SC_TRACE_EVENT(sys_reboot,
++ TP_PROTO(int magic1, int magic2, unsigned int cmd, void * arg),
++ TP_ARGS(magic1, magic2, cmd, arg),
++ TP_STRUCT__entry(__field(int, magic1) __field(int, magic2) __field(unsigned int, cmd) __field_hex(void *, arg)),
++ TP_fast_assign(tp_assign(magic1, magic1) tp_assign(magic2, magic2) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_wait4
++SC_TRACE_EVENT(sys_wait4,
++ TP_PROTO(pid_t upid, int * stat_addr, int options, struct rusage * ru),
++ TP_ARGS(upid, stat_addr, options, ru),
++ TP_STRUCT__entry(__field(pid_t, upid) __field_hex(int *, stat_addr) __field(int, options) __field_hex(struct rusage *, ru)),
++ TP_fast_assign(tp_assign(upid, upid) tp_assign(stat_addr, stat_addr) tp_assign(options, options) tp_assign(ru, ru)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_quotactl
++SC_TRACE_EVENT(sys_quotactl,
++ TP_PROTO(unsigned int cmd, const char * special, qid_t id, void * addr),
++ TP_ARGS(cmd, special, id, addr),
++ TP_STRUCT__entry(__field(unsigned int, cmd) __field_hex(const char *, special) __field(qid_t, id) __field_hex(void *, addr)),
++ TP_fast_assign(tp_assign(cmd, cmd) tp_assign(special, special) tp_assign(id, id) tp_assign(addr, addr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mount
++SC_TRACE_EVENT(sys_mount,
++ TP_PROTO(char * dev_name, char * dir_name, char * type, unsigned long flags, void * data),
++ TP_ARGS(dev_name, dir_name, type, flags, data),
++ TP_STRUCT__entry(__string_from_user(dev_name, dev_name) __string_from_user(dir_name, dir_name) __string_from_user(type, type) __field(unsigned long, flags) __field_hex(void *, data)),
++ TP_fast_assign(tp_copy_string_from_user(dev_name, dev_name) tp_copy_string_from_user(dir_name, dir_name) tp_copy_string_from_user(type, type) tp_assign(flags, flags) tp_assign(data, data)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_llseek
++SC_TRACE_EVENT(sys_llseek,
++ TP_PROTO(unsigned int fd, unsigned long offset_high, unsigned long offset_low, loff_t * result, unsigned int origin),
++ TP_ARGS(fd, offset_high, offset_low, result, origin),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned long, offset_high) __field(unsigned long, offset_low) __field_hex(loff_t *, result) __field(unsigned int, origin)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(offset_high, offset_high) tp_assign(offset_low, offset_low) tp_assign(result, result) tp_assign(origin, origin)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_select
++SC_TRACE_EVENT(sys_select,
++ TP_PROTO(int n, fd_set * inp, fd_set * outp, fd_set * exp, struct timeval * tvp),
++ TP_ARGS(n, inp, outp, exp, tvp),
++ TP_STRUCT__entry(__field(int, n) __field_hex(fd_set *, inp) __field_hex(fd_set *, outp) __field_hex(fd_set *, exp) __field_hex(struct timeval *, tvp)),
++ TP_fast_assign(tp_assign(n, n) tp_assign(inp, inp) tp_assign(outp, outp) tp_assign(exp, exp) tp_assign(tvp, tvp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ipc
++SC_TRACE_EVENT(sys_ipc,
++ TP_PROTO(unsigned int call, int first, unsigned long second, unsigned long third, void * ptr, long fifth),
++ TP_ARGS(call, first, second, third, ptr, fifth),
++ TP_STRUCT__entry(__field(unsigned int, call) __field(int, first) __field(unsigned long, second) __field(unsigned long, third) __field_hex(void *, ptr) __field(long, fifth)),
++ TP_fast_assign(tp_assign(call, call) tp_assign(first, first) tp_assign(second, second) tp_assign(third, third) tp_assign(ptr, ptr) tp_assign(fifth, fifth)),
++ TP_printk()
++)
++#endif
++
++#endif /* _TRACE_SYSCALLS_POINTERS_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
++
++#else /* CREATE_SYSCALL_TABLE */
++
++#include "mips-32-syscalls-3.5.0_pointers_override.h"
++#include "syscalls_pointers_override.h"
++
++#ifndef OVERRIDE_TABLE_32_sys_read
++TRACE_SYSCALL_TABLE(sys_read, sys_read, 4007, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_write
++TRACE_SYSCALL_TABLE(sys_write, sys_write, 4009, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_open
++TRACE_SYSCALL_TABLE(sys_open, sys_open, 4011, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_waitpid
++TRACE_SYSCALL_TABLE(sys_waitpid, sys_waitpid, 4015, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_creat
++TRACE_SYSCALL_TABLE(sys_creat, sys_creat, 4017, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_link
++TRACE_SYSCALL_TABLE(sys_link, sys_link, 4019, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_unlink
++TRACE_SYSCALL_TABLE(sys_unlink, sys_unlink, 4021, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_chdir
++TRACE_SYSCALL_TABLE(sys_chdir, sys_chdir, 4025, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_time
++TRACE_SYSCALL_TABLE(sys_time, sys_time, 4027, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mknod
++TRACE_SYSCALL_TABLE(sys_mknod, sys_mknod, 4029, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_chmod
++TRACE_SYSCALL_TABLE(sys_chmod, sys_chmod, 4031, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lchown
++TRACE_SYSCALL_TABLE(sys_lchown, sys_lchown, 4033, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mount
++TRACE_SYSCALL_TABLE(sys_mount, sys_mount, 4043, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_oldumount
++TRACE_SYSCALL_TABLE(sys_oldumount, sys_oldumount, 4045, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_stime
++TRACE_SYSCALL_TABLE(sys_stime, sys_stime, 4051, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_utime
++TRACE_SYSCALL_TABLE(sys_utime, sys_utime, 4061, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_access
++TRACE_SYSCALL_TABLE(sys_access, sys_access, 4067, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rename
++TRACE_SYSCALL_TABLE(sys_rename, sys_rename, 4077, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mkdir
++TRACE_SYSCALL_TABLE(sys_mkdir, sys_mkdir, 4079, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rmdir
++TRACE_SYSCALL_TABLE(sys_rmdir, sys_rmdir, 4081, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_times
++TRACE_SYSCALL_TABLE(sys_times, sys_times, 4087, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_umount
++TRACE_SYSCALL_TABLE(sys_umount, sys_umount, 4105, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_olduname
++TRACE_SYSCALL_TABLE(sys_olduname, sys_olduname, 4119, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_chroot
++TRACE_SYSCALL_TABLE(sys_chroot, sys_chroot, 4123, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ustat
++TRACE_SYSCALL_TABLE(sys_ustat, sys_ustat, 4125, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sigaction
++TRACE_SYSCALL_TABLE(sys_sigaction, sys_sigaction, 4135, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sigpending
++TRACE_SYSCALL_TABLE(sys_sigpending, sys_sigpending, 4147, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sethostname
++TRACE_SYSCALL_TABLE(sys_sethostname, sys_sethostname, 4149, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setrlimit
++TRACE_SYSCALL_TABLE(sys_setrlimit, sys_setrlimit, 4151, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getrlimit
++TRACE_SYSCALL_TABLE(sys_getrlimit, sys_getrlimit, 4153, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getrusage
++TRACE_SYSCALL_TABLE(sys_getrusage, sys_getrusage, 4155, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_gettimeofday
++TRACE_SYSCALL_TABLE(sys_gettimeofday, sys_gettimeofday, 4157, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_settimeofday
++TRACE_SYSCALL_TABLE(sys_settimeofday, sys_settimeofday, 4159, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getgroups
++TRACE_SYSCALL_TABLE(sys_getgroups, sys_getgroups, 4161, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setgroups
++TRACE_SYSCALL_TABLE(sys_setgroups, sys_setgroups, 4163, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_symlink
++TRACE_SYSCALL_TABLE(sys_symlink, sys_symlink, 4167, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_readlink
++TRACE_SYSCALL_TABLE(sys_readlink, sys_readlink, 4171, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_uselib
++TRACE_SYSCALL_TABLE(sys_uselib, sys_uselib, 4173, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_swapon
++TRACE_SYSCALL_TABLE(sys_swapon, sys_swapon, 4175, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_reboot
++TRACE_SYSCALL_TABLE(sys_reboot, sys_reboot, 4177, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_old_readdir
++TRACE_SYSCALL_TABLE(sys_old_readdir, sys_old_readdir, 4179, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_truncate
++TRACE_SYSCALL_TABLE(sys_truncate, sys_truncate, 4185, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_statfs
++TRACE_SYSCALL_TABLE(sys_statfs, sys_statfs, 4199, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fstatfs
++TRACE_SYSCALL_TABLE(sys_fstatfs, sys_fstatfs, 4201, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_socketcall
++TRACE_SYSCALL_TABLE(sys_socketcall, sys_socketcall, 4205, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_syslog
++TRACE_SYSCALL_TABLE(sys_syslog, sys_syslog, 4207, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setitimer
++TRACE_SYSCALL_TABLE(sys_setitimer, sys_setitimer, 4209, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getitimer
++TRACE_SYSCALL_TABLE(sys_getitimer, sys_getitimer, 4211, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_newstat
++TRACE_SYSCALL_TABLE(sys_newstat, sys_newstat, 4213, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_newlstat
++TRACE_SYSCALL_TABLE(sys_newlstat, sys_newlstat, 4215, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_newfstat
++TRACE_SYSCALL_TABLE(sys_newfstat, sys_newfstat, 4217, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_uname
++TRACE_SYSCALL_TABLE(sys_uname, sys_uname, 4219, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_wait4
++TRACE_SYSCALL_TABLE(sys_wait4, sys_wait4, 4229, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_swapoff
++TRACE_SYSCALL_TABLE(sys_swapoff, sys_swapoff, 4231, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sysinfo
++TRACE_SYSCALL_TABLE(sys_sysinfo, sys_sysinfo, 4233, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ipc
++TRACE_SYSCALL_TABLE(sys_ipc, sys_ipc, 4235, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setdomainname
++TRACE_SYSCALL_TABLE(sys_setdomainname, sys_setdomainname, 4243, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_newuname
++TRACE_SYSCALL_TABLE(sys_newuname, sys_newuname, 4245, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_adjtimex
++TRACE_SYSCALL_TABLE(sys_adjtimex, sys_adjtimex, 4249, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sigprocmask
++TRACE_SYSCALL_TABLE(sys_sigprocmask, sys_sigprocmask, 4253, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_init_module
++TRACE_SYSCALL_TABLE(sys_init_module, sys_init_module, 4257, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_delete_module
++TRACE_SYSCALL_TABLE(sys_delete_module, sys_delete_module, 4259, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_quotactl
++TRACE_SYSCALL_TABLE(sys_quotactl, sys_quotactl, 4263, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_llseek
++TRACE_SYSCALL_TABLE(sys_llseek, sys_llseek, 4281, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getdents
++TRACE_SYSCALL_TABLE(sys_getdents, sys_getdents, 4283, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_select
++TRACE_SYSCALL_TABLE(sys_select, sys_select, 4285, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_readv
++TRACE_SYSCALL_TABLE(sys_readv, sys_readv, 4291, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_writev
++TRACE_SYSCALL_TABLE(sys_writev, sys_writev, 4293, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_cachectl
++TRACE_SYSCALL_TABLE(sys_cachectl, sys_cachectl, 4297, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sysctl
++TRACE_SYSCALL_TABLE(sys_sysctl, sys_sysctl, 4307, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_setparam
++TRACE_SYSCALL_TABLE(sys_sched_setparam, sys_sched_setparam, 4317, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_getparam
++TRACE_SYSCALL_TABLE(sys_sched_getparam, sys_sched_getparam, 4319, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_setscheduler
++TRACE_SYSCALL_TABLE(sys_sched_setscheduler, sys_sched_setscheduler, 4321, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_rr_get_interval
++TRACE_SYSCALL_TABLE(sys_sched_rr_get_interval, sys_sched_rr_get_interval, 4331, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_nanosleep
++TRACE_SYSCALL_TABLE(sys_nanosleep, sys_nanosleep, 4333, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_accept
++TRACE_SYSCALL_TABLE(sys_accept, sys_accept, 4337, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_bind
++TRACE_SYSCALL_TABLE(sys_bind, sys_bind, 4339, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_connect
++TRACE_SYSCALL_TABLE(sys_connect, sys_connect, 4341, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getpeername
++TRACE_SYSCALL_TABLE(sys_getpeername, sys_getpeername, 4343, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getsockname
++TRACE_SYSCALL_TABLE(sys_getsockname, sys_getsockname, 4345, 3)
++#endif
++
++#endif /* CREATE_SYSCALL_TABLE */
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/mips-32-syscalls-3.5.0_pointers_override.h
+@@ -0,0 +1,8 @@
++#ifndef CREATE_SYSCALL_TABLE
++
++#else /* CREATE_SYSCALL_TABLE */
++
++#define OVERRIDE_TABLE_32_sys_clone
++TRACE_SYSCALL_TABLE(sys_clone, sys_clone, 4120, 0)
++
++#endif /* CREATE_SYSCALL_TABLE */
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/mips-64-syscalls-3.5.0_integers.h
+@@ -0,0 +1,1163 @@
++/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */
++#ifndef CREATE_SYSCALL_TABLE
++
++#if !defined(_TRACE_SYSCALLS_INTEGERS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SYSCALLS_INTEGERS_H
++
++#include <linux/tracepoint.h>
++#include <linux/syscalls.h>
++#include "mips-64-syscalls-3.5.0_integers_override.h"
++#include "syscalls_integers_override.h"
++
++SC_DECLARE_EVENT_CLASS_NOARGS(syscalls_noargs,
++ TP_STRUCT__entry(),
++ TP_fast_assign(),
++ TP_printk()
++)
++#ifndef OVERRIDE_64_sys_sgetmask
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sgetmask)
++#endif
++#ifndef OVERRIDE_64_sys_sched_yield
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sched_yield)
++#endif
++#ifndef OVERRIDE_64_sys_pause
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_pause)
++#endif
++#ifndef OVERRIDE_64_sys_getpid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getpid)
++#endif
++#ifndef OVERRIDE_64_sys_getuid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getuid)
++#endif
++#ifndef OVERRIDE_64_sys_getgid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getgid)
++#endif
++#ifndef OVERRIDE_64_sys_geteuid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_geteuid)
++#endif
++#ifndef OVERRIDE_64_sys_getegid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getegid)
++#endif
++#ifndef OVERRIDE_64_sys_getppid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getppid)
++#endif
++#ifndef OVERRIDE_64_sys_getpgrp
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getpgrp)
++#endif
++#ifndef OVERRIDE_64_sys_setsid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_setsid)
++#endif
++#ifndef OVERRIDE_64_sys_munlockall
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_munlockall)
++#endif
++#ifndef OVERRIDE_64_sys_vhangup
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_vhangup)
++#endif
++#ifndef OVERRIDE_64_sys_sync
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sync)
++#endif
++#ifndef OVERRIDE_64_sys_gettid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_gettid)
++#endif
++#ifndef OVERRIDE_64_sys_restart_syscall
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_restart_syscall)
++#endif
++#ifndef OVERRIDE_64_sys_inotify_init
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_inotify_init)
++#endif
++#ifndef OVERRIDE_64_sys_nice
++SC_TRACE_EVENT(sys_nice,
++ TP_PROTO(int increment),
++ TP_ARGS(increment),
++ TP_STRUCT__entry(__field(int, increment)),
++ TP_fast_assign(tp_assign(increment, increment)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_ssetmask
++SC_TRACE_EVENT(sys_ssetmask,
++ TP_PROTO(int newmask),
++ TP_ARGS(newmask),
++ TP_STRUCT__entry(__field(int, newmask)),
++ TP_fast_assign(tp_assign(newmask, newmask)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_personality
++SC_TRACE_EVENT(sys_personality,
++ TP_PROTO(unsigned int personality),
++ TP_ARGS(personality),
++ TP_STRUCT__entry(__field(unsigned int, personality)),
++ TP_fast_assign(tp_assign(personality, personality)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_close
++SC_TRACE_EVENT(sys_close,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_brk
++SC_TRACE_EVENT(sys_brk,
++ TP_PROTO(unsigned long brk),
++ TP_ARGS(brk),
++ TP_STRUCT__entry(__field(unsigned long, brk)),
++ TP_fast_assign(tp_assign(brk, brk)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_dup
++SC_TRACE_EVENT(sys_dup,
++ TP_PROTO(unsigned int fildes),
++ TP_ARGS(fildes),
++ TP_STRUCT__entry(__field(unsigned int, fildes)),
++ TP_fast_assign(tp_assign(fildes, fildes)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_alarm
++SC_TRACE_EVENT(sys_alarm,
++ TP_PROTO(unsigned int seconds),
++ TP_ARGS(seconds),
++ TP_STRUCT__entry(__field(unsigned int, seconds)),
++ TP_fast_assign(tp_assign(seconds, seconds)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_exit
++SC_TRACE_EVENT(sys_exit,
++ TP_PROTO(int error_code),
++ TP_ARGS(error_code),
++ TP_STRUCT__entry(__field(int, error_code)),
++ TP_fast_assign(tp_assign(error_code, error_code)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fsync
++SC_TRACE_EVENT(sys_fsync,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fdatasync
++SC_TRACE_EVENT(sys_fdatasync,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fchdir
++SC_TRACE_EVENT(sys_fchdir,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_umask
++SC_TRACE_EVENT(sys_umask,
++ TP_PROTO(int mask),
++ TP_ARGS(mask),
++ TP_STRUCT__entry(__field(int, mask)),
++ TP_fast_assign(tp_assign(mask, mask)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setuid
++SC_TRACE_EVENT(sys_setuid,
++ TP_PROTO(uid_t uid),
++ TP_ARGS(uid),
++ TP_STRUCT__entry(__field(uid_t, uid)),
++ TP_fast_assign(tp_assign(uid, uid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setgid
++SC_TRACE_EVENT(sys_setgid,
++ TP_PROTO(gid_t gid),
++ TP_ARGS(gid),
++ TP_STRUCT__entry(__field(gid_t, gid)),
++ TP_fast_assign(tp_assign(gid, gid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getpgid
++SC_TRACE_EVENT(sys_getpgid,
++ TP_PROTO(pid_t pid),
++ TP_ARGS(pid),
++ TP_STRUCT__entry(__field(pid_t, pid)),
++ TP_fast_assign(tp_assign(pid, pid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setfsuid
++SC_TRACE_EVENT(sys_setfsuid,
++ TP_PROTO(uid_t uid),
++ TP_ARGS(uid),
++ TP_STRUCT__entry(__field(uid_t, uid)),
++ TP_fast_assign(tp_assign(uid, uid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setfsgid
++SC_TRACE_EVENT(sys_setfsgid,
++ TP_PROTO(gid_t gid),
++ TP_ARGS(gid),
++ TP_STRUCT__entry(__field(gid_t, gid)),
++ TP_fast_assign(tp_assign(gid, gid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getsid
++SC_TRACE_EVENT(sys_getsid,
++ TP_PROTO(pid_t pid),
++ TP_ARGS(pid),
++ TP_STRUCT__entry(__field(pid_t, pid)),
++ TP_fast_assign(tp_assign(pid, pid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_32_personality
++SC_TRACE_EVENT(sys_32_personality,
++ TP_PROTO(unsigned long personality),
++ TP_ARGS(personality),
++ TP_STRUCT__entry(__field(unsigned long, personality)),
++ TP_fast_assign(tp_assign(personality, personality)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sched_getscheduler
++SC_TRACE_EVENT(sys_sched_getscheduler,
++ TP_PROTO(pid_t pid),
++ TP_ARGS(pid),
++ TP_STRUCT__entry(__field(pid_t, pid)),
++ TP_fast_assign(tp_assign(pid, pid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sched_get_priority_max
++SC_TRACE_EVENT(sys_sched_get_priority_max,
++ TP_PROTO(int policy),
++ TP_ARGS(policy),
++ TP_STRUCT__entry(__field(int, policy)),
++ TP_fast_assign(tp_assign(policy, policy)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sched_get_priority_min
++SC_TRACE_EVENT(sys_sched_get_priority_min,
++ TP_PROTO(int policy),
++ TP_ARGS(policy),
++ TP_STRUCT__entry(__field(int, policy)),
++ TP_fast_assign(tp_assign(policy, policy)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mlockall
++SC_TRACE_EVENT(sys_mlockall,
++ TP_PROTO(int flags),
++ TP_ARGS(flags),
++ TP_STRUCT__entry(__field(int, flags)),
++ TP_fast_assign(tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_io_destroy
++SC_TRACE_EVENT(sys_io_destroy,
++ TP_PROTO(aio_context_t ctx),
++ TP_ARGS(ctx),
++ TP_STRUCT__entry(__field(aio_context_t, ctx)),
++ TP_fast_assign(tp_assign(ctx, ctx)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_exit_group
++SC_TRACE_EVENT(sys_exit_group,
++ TP_PROTO(int error_code),
++ TP_ARGS(error_code),
++ TP_STRUCT__entry(__field(int, error_code)),
++ TP_fast_assign(tp_assign(error_code, error_code)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_epoll_create
++SC_TRACE_EVENT(sys_epoll_create,
++ TP_PROTO(int size),
++ TP_ARGS(size),
++ TP_STRUCT__entry(__field(int, size)),
++ TP_fast_assign(tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_timer_getoverrun
++SC_TRACE_EVENT(sys_timer_getoverrun,
++ TP_PROTO(timer_t timer_id),
++ TP_ARGS(timer_id),
++ TP_STRUCT__entry(__field(timer_t, timer_id)),
++ TP_fast_assign(tp_assign(timer_id, timer_id)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_timer_delete
++SC_TRACE_EVENT(sys_timer_delete,
++ TP_PROTO(timer_t timer_id),
++ TP_ARGS(timer_id),
++ TP_STRUCT__entry(__field(timer_t, timer_id)),
++ TP_fast_assign(tp_assign(timer_id, timer_id)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_set_thread_area
++SC_TRACE_EVENT(sys_set_thread_area,
++ TP_PROTO(unsigned long addr),
++ TP_ARGS(addr),
++ TP_STRUCT__entry(__field_hex(unsigned long, addr)),
++ TP_fast_assign(tp_assign(addr, addr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_unshare
++SC_TRACE_EVENT(sys_unshare,
++ TP_PROTO(unsigned long unshare_flags),
++ TP_ARGS(unshare_flags),
++ TP_STRUCT__entry(__field(unsigned long, unshare_flags)),
++ TP_fast_assign(tp_assign(unshare_flags, unshare_flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_eventfd
++SC_TRACE_EVENT(sys_eventfd,
++ TP_PROTO(unsigned int count),
++ TP_ARGS(count),
++ TP_STRUCT__entry(__field(unsigned int, count)),
++ TP_fast_assign(tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_epoll_create1
++SC_TRACE_EVENT(sys_epoll_create1,
++ TP_PROTO(int flags),
++ TP_ARGS(flags),
++ TP_STRUCT__entry(__field(int, flags)),
++ TP_fast_assign(tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_inotify_init1
++SC_TRACE_EVENT(sys_inotify_init1,
++ TP_PROTO(int flags),
++ TP_ARGS(flags),
++ TP_STRUCT__entry(__field(int, flags)),
++ TP_fast_assign(tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_syncfs
++SC_TRACE_EVENT(sys_syncfs,
++ TP_PROTO(int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_bdflush
++SC_TRACE_EVENT(sys_bdflush,
++ TP_PROTO(int func, long data),
++ TP_ARGS(func, data),
++ TP_STRUCT__entry(__field(int, func) __field(long, data)),
++ TP_fast_assign(tp_assign(func, func) tp_assign(data, data)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_munmap
++SC_TRACE_EVENT(sys_munmap,
++ TP_PROTO(unsigned long addr, size_t len),
++ TP_ARGS(addr, len),
++ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(size_t, len)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_dup2
++SC_TRACE_EVENT(sys_dup2,
++ TP_PROTO(unsigned int oldfd, unsigned int newfd),
++ TP_ARGS(oldfd, newfd),
++ TP_STRUCT__entry(__field(unsigned int, oldfd) __field(unsigned int, newfd)),
++ TP_fast_assign(tp_assign(oldfd, oldfd) tp_assign(newfd, newfd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_shutdown
++SC_TRACE_EVENT(sys_shutdown,
++ TP_PROTO(int fd, int how),
++ TP_ARGS(fd, how),
++ TP_STRUCT__entry(__field(int, fd) __field(int, how)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(how, how)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_listen
++SC_TRACE_EVENT(sys_listen,
++ TP_PROTO(int fd, int backlog),
++ TP_ARGS(fd, backlog),
++ TP_STRUCT__entry(__field(int, fd) __field(int, backlog)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(backlog, backlog)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_kill
++SC_TRACE_EVENT(sys_kill,
++ TP_PROTO(pid_t pid, int sig),
++ TP_ARGS(pid, sig),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_msgget
++SC_TRACE_EVENT(sys_msgget,
++ TP_PROTO(key_t key, int msgflg),
++ TP_ARGS(key, msgflg),
++ TP_STRUCT__entry(__field(key_t, key) __field(int, msgflg)),
++ TP_fast_assign(tp_assign(key, key) tp_assign(msgflg, msgflg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_flock
++SC_TRACE_EVENT(sys_flock,
++ TP_PROTO(unsigned int fd, unsigned int cmd),
++ TP_ARGS(fd, cmd),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_ftruncate
++SC_TRACE_EVENT(sys_ftruncate,
++ TP_PROTO(unsigned int fd, unsigned long length),
++ TP_ARGS(fd, length),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned long, length)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(length, length)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fchmod
++SC_TRACE_EVENT(sys_fchmod,
++ TP_PROTO(unsigned int fd, umode_t mode),
++ TP_ARGS(fd, mode),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(umode_t, mode)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setpgid
++SC_TRACE_EVENT(sys_setpgid,
++ TP_PROTO(pid_t pid, pid_t pgid),
++ TP_ARGS(pid, pgid),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(pid_t, pgid)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(pgid, pgid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setreuid
++SC_TRACE_EVENT(sys_setreuid,
++ TP_PROTO(uid_t ruid, uid_t euid),
++ TP_ARGS(ruid, euid),
++ TP_STRUCT__entry(__field(uid_t, ruid) __field(uid_t, euid)),
++ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setregid
++SC_TRACE_EVENT(sys_setregid,
++ TP_PROTO(gid_t rgid, gid_t egid),
++ TP_ARGS(rgid, egid),
++ TP_STRUCT__entry(__field(gid_t, rgid) __field(gid_t, egid)),
++ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getpriority
++SC_TRACE_EVENT(sys_getpriority,
++ TP_PROTO(int which, int who),
++ TP_ARGS(which, who),
++ TP_STRUCT__entry(__field(int, which) __field(int, who)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mlock
++SC_TRACE_EVENT(sys_mlock,
++ TP_PROTO(unsigned long start, size_t len),
++ TP_ARGS(start, len),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_munlock
++SC_TRACE_EVENT(sys_munlock,
++ TP_PROTO(unsigned long start, size_t len),
++ TP_ARGS(start, len),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_tkill
++SC_TRACE_EVENT(sys_tkill,
++ TP_PROTO(pid_t pid, int sig),
++ TP_ARGS(pid, sig),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_inotify_rm_watch
++SC_TRACE_EVENT(sys_inotify_rm_watch,
++ TP_PROTO(int fd, __s32 wd),
++ TP_ARGS(fd, wd),
++ TP_STRUCT__entry(__field(int, fd) __field(__s32, wd)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(wd, wd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_ioprio_get
++SC_TRACE_EVENT(sys_ioprio_get,
++ TP_PROTO(int which, int who),
++ TP_ARGS(which, who),
++ TP_STRUCT__entry(__field(int, which) __field(int, who)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_timerfd_create
++SC_TRACE_EVENT(sys_timerfd_create,
++ TP_PROTO(int clockid, int flags),
++ TP_ARGS(clockid, flags),
++ TP_STRUCT__entry(__field(int, clockid) __field(int, flags)),
++ TP_fast_assign(tp_assign(clockid, clockid) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_eventfd2
++SC_TRACE_EVENT(sys_eventfd2,
++ TP_PROTO(unsigned int count, int flags),
++ TP_ARGS(count, flags),
++ TP_STRUCT__entry(__field(unsigned int, count) __field(int, flags)),
++ TP_fast_assign(tp_assign(count, count) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setns
++SC_TRACE_EVENT(sys_setns,
++ TP_PROTO(int fd, int nstype),
++ TP_ARGS(fd, nstype),
++ TP_STRUCT__entry(__field(int, fd) __field(int, nstype)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(nstype, nstype)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_ioctl
++SC_TRACE_EVENT(sys_ioctl,
++ TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
++ TP_ARGS(fd, cmd, arg),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fcntl
++SC_TRACE_EVENT(sys_fcntl,
++ TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
++ TP_ARGS(fd, cmd, arg),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_lseek
++SC_TRACE_EVENT(sys_lseek,
++ TP_PROTO(unsigned int fd, off_t offset, unsigned int origin),
++ TP_ARGS(fd, offset, origin),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(off_t, offset) __field(unsigned int, origin)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(offset, offset) tp_assign(origin, origin)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mprotect
++SC_TRACE_EVENT(sys_mprotect,
++ TP_PROTO(unsigned long start, size_t len, unsigned long prot),
++ TP_ARGS(start, len, prot),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field(unsigned long, prot)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(prot, prot)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_msync
++SC_TRACE_EVENT(sys_msync,
++ TP_PROTO(unsigned long start, size_t len, int flags),
++ TP_ARGS(start, len, flags),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field(int, flags)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_madvise
++SC_TRACE_EVENT(sys_madvise,
++ TP_PROTO(unsigned long start, size_t len_in, int behavior),
++ TP_ARGS(start, len_in, behavior),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len_in) __field(int, behavior)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len_in, len_in) tp_assign(behavior, behavior)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_shmget
++SC_TRACE_EVENT(sys_shmget,
++ TP_PROTO(key_t key, size_t size, int shmflg),
++ TP_ARGS(key, size, shmflg),
++ TP_STRUCT__entry(__field(key_t, key) __field(size_t, size) __field(int, shmflg)),
++ TP_fast_assign(tp_assign(key, key) tp_assign(size, size) tp_assign(shmflg, shmflg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_socket
++SC_TRACE_EVENT(sys_socket,
++ TP_PROTO(int family, int type, int protocol),
++ TP_ARGS(family, type, protocol),
++ TP_STRUCT__entry(__field(int, family) __field(int, type) __field(int, protocol)),
++ TP_fast_assign(tp_assign(family, family) tp_assign(type, type) tp_assign(protocol, protocol)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_semget
++SC_TRACE_EVENT(sys_semget,
++ TP_PROTO(key_t key, int nsems, int semflg),
++ TP_ARGS(key, nsems, semflg),
++ TP_STRUCT__entry(__field(key_t, key) __field(int, nsems) __field(int, semflg)),
++ TP_fast_assign(tp_assign(key, key) tp_assign(nsems, nsems) tp_assign(semflg, semflg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fchown
++SC_TRACE_EVENT(sys_fchown,
++ TP_PROTO(unsigned int fd, uid_t user, gid_t group),
++ TP_ARGS(fd, user, group),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(uid_t, user) __field(gid_t, group)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setresuid
++SC_TRACE_EVENT(sys_setresuid,
++ TP_PROTO(uid_t ruid, uid_t euid, uid_t suid),
++ TP_ARGS(ruid, euid, suid),
++ TP_STRUCT__entry(__field(uid_t, ruid) __field(uid_t, euid) __field(uid_t, suid)),
++ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setresgid
++SC_TRACE_EVENT(sys_setresgid,
++ TP_PROTO(gid_t rgid, gid_t egid, gid_t sgid),
++ TP_ARGS(rgid, egid, sgid),
++ TP_STRUCT__entry(__field(gid_t, rgid) __field(gid_t, egid) __field(gid_t, sgid)),
++ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sysfs
++SC_TRACE_EVENT(sys_sysfs,
++ TP_PROTO(int option, unsigned long arg1, unsigned long arg2),
++ TP_ARGS(option, arg1, arg2),
++ TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg1) __field(unsigned long, arg2)),
++ TP_fast_assign(tp_assign(option, option) tp_assign(arg1, arg1) tp_assign(arg2, arg2)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setpriority
++SC_TRACE_EVENT(sys_setpriority,
++ TP_PROTO(int which, int who, int niceval),
++ TP_ARGS(which, who, niceval),
++ TP_STRUCT__entry(__field(int, which) __field(int, who) __field(int, niceval)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who) tp_assign(niceval, niceval)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_cacheflush
++SC_TRACE_EVENT(sys_cacheflush,
++ TP_PROTO(unsigned long addr, unsigned long bytes, unsigned int cache),
++ TP_ARGS(addr, bytes, cache),
++ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(unsigned long, bytes) __field(unsigned int, cache)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(bytes, bytes) tp_assign(cache, cache)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_tgkill
++SC_TRACE_EVENT(sys_tgkill,
++ TP_PROTO(pid_t tgid, pid_t pid, int sig),
++ TP_ARGS(tgid, pid, sig),
++ TP_STRUCT__entry(__field(pid_t, tgid) __field(pid_t, pid) __field(int, sig)),
++ TP_fast_assign(tp_assign(tgid, tgid) tp_assign(pid, pid) tp_assign(sig, sig)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_ioprio_set
++SC_TRACE_EVENT(sys_ioprio_set,
++ TP_PROTO(int which, int who, int ioprio),
++ TP_ARGS(which, who, ioprio),
++ TP_STRUCT__entry(__field(int, which) __field(int, who) __field(int, ioprio)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who) tp_assign(ioprio, ioprio)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_dup3
++SC_TRACE_EVENT(sys_dup3,
++ TP_PROTO(unsigned int oldfd, unsigned int newfd, int flags),
++ TP_ARGS(oldfd, newfd, flags),
++ TP_STRUCT__entry(__field(unsigned int, oldfd) __field(unsigned int, newfd) __field(int, flags)),
++ TP_fast_assign(tp_assign(oldfd, oldfd) tp_assign(newfd, newfd) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_32_ftruncate64
++SC_TRACE_EVENT(sys_32_ftruncate64,
++ TP_PROTO(unsigned long fd, unsigned long __dummy, unsigned long a2, unsigned long a3),
++ TP_ARGS(fd, __dummy, a2, a3),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field(unsigned long, __dummy) __field(unsigned long, a2) __field(unsigned long, a3)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(__dummy, __dummy) tp_assign(a2, a2) tp_assign(a3, a3)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_ptrace
++SC_TRACE_EVENT(sys_ptrace,
++ TP_PROTO(long request, long pid, unsigned long addr, unsigned long data),
++ TP_ARGS(request, pid, addr, data),
++ TP_STRUCT__entry(__field(long, request) __field(long, pid) __field_hex(unsigned long, addr) __field(unsigned long, data)),
++ TP_fast_assign(tp_assign(request, request) tp_assign(pid, pid) tp_assign(addr, addr) tp_assign(data, data)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_n32_semctl
++SC_TRACE_EVENT(sys_n32_semctl,
++ TP_PROTO(int semid, int semnum, int cmd, u32 arg),
++ TP_ARGS(semid, semnum, cmd, arg),
++ TP_STRUCT__entry(__field(int, semid) __field(int, semnum) __field(int, cmd) __field(u32, arg)),
++ TP_fast_assign(tp_assign(semid, semid) tp_assign(semnum, semnum) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_n32_msgsnd
++SC_TRACE_EVENT(sys_n32_msgsnd,
++ TP_PROTO(int msqid, u32 msgp, unsigned int msgsz, int msgflg),
++ TP_ARGS(msqid, msgp, msgsz, msgflg),
++ TP_STRUCT__entry(__field(int, msqid) __field(u32, msgp) __field(unsigned int, msgsz) __field(int, msgflg)),
++ TP_fast_assign(tp_assign(msqid, msqid) tp_assign(msgp, msgp) tp_assign(msgsz, msgsz) tp_assign(msgflg, msgflg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_tee
++SC_TRACE_EVENT(sys_tee,
++ TP_PROTO(int fdin, int fdout, size_t len, unsigned int flags),
++ TP_ARGS(fdin, fdout, len, flags),
++ TP_STRUCT__entry(__field(int, fdin) __field(int, fdout) __field(size_t, len) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fdin, fdin) tp_assign(fdout, fdout) tp_assign(len, len) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mremap
++SC_TRACE_EVENT(sys_mremap,
++ TP_PROTO(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr),
++ TP_ARGS(addr, old_len, new_len, flags, new_addr),
++ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(unsigned long, old_len) __field(unsigned long, new_len) __field(unsigned long, flags) __field_hex(unsigned long, new_addr)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(old_len, old_len) tp_assign(new_len, new_len) tp_assign(flags, flags) tp_assign(new_addr, new_addr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_n32_msgrcv
++SC_TRACE_EVENT(sys_n32_msgrcv,
++ TP_PROTO(int msqid, u32 msgp, size_t msgsz, int msgtyp, int msgflg),
++ TP_ARGS(msqid, msgp, msgsz, msgtyp, msgflg),
++ TP_STRUCT__entry(__field(int, msqid) __field(u32, msgp) __field(size_t, msgsz) __field(int, msgtyp) __field(int, msgflg)),
++ TP_fast_assign(tp_assign(msqid, msqid) tp_assign(msgp, msgp) tp_assign(msgsz, msgsz) tp_assign(msgtyp, msgtyp) tp_assign(msgflg, msgflg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_prctl
++SC_TRACE_EVENT(sys_prctl,
++ TP_PROTO(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5),
++ TP_ARGS(option, arg2, arg3, arg4, arg5),
++ TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg2) __field(unsigned long, arg3) __field(unsigned long, arg4) __field(unsigned long, arg5)),
++ TP_fast_assign(tp_assign(option, option) tp_assign(arg2, arg2) tp_assign(arg3, arg3) tp_assign(arg4, arg4) tp_assign(arg5, arg5)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_remap_file_pages
++SC_TRACE_EVENT(sys_remap_file_pages,
++ TP_PROTO(unsigned long start, unsigned long size, unsigned long prot, unsigned long pgoff, unsigned long flags),
++ TP_ARGS(start, size, prot, pgoff, flags),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(unsigned long, size) __field(unsigned long, prot) __field(unsigned long, pgoff) __field(unsigned long, flags)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(size, size) tp_assign(prot, prot) tp_assign(pgoff, pgoff) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_32_ipc
++SC_TRACE_EVENT(sys_32_ipc,
++ TP_PROTO(u32 call, long first, long second, long third, unsigned long ptr, unsigned long fifth),
++ TP_ARGS(call, first, second, third, ptr, fifth),
++ TP_STRUCT__entry(__field(u32, call) __field(long, first) __field(long, second) __field(long, third) __field_hex(unsigned long, ptr) __field(unsigned long, fifth)),
++ TP_fast_assign(tp_assign(call, call) tp_assign(first, first) tp_assign(second, second) tp_assign(third, third) tp_assign(ptr, ptr) tp_assign(fifth, fifth)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mips_mmap2
++SC_TRACE_EVENT(sys_mips_mmap2,
++ TP_PROTO(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff),
++ TP_ARGS(addr, len, prot, flags, fd, pgoff),
++ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(unsigned long, len) __field(unsigned long, prot) __field(unsigned long, flags) __field(unsigned long, fd) __field(unsigned long, pgoff)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len) tp_assign(prot, prot) tp_assign(flags, flags) tp_assign(fd, fd) tp_assign(pgoff, pgoff)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mips_mmap
++SC_TRACE_EVENT(sys_mips_mmap,
++ TP_PROTO(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, off_t offset),
++ TP_ARGS(addr, len, prot, flags, fd, offset),
++ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(unsigned long, len) __field(unsigned long, prot) __field(unsigned long, flags) __field(unsigned long, fd) __field(off_t, offset)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len) tp_assign(prot, prot) tp_assign(flags, flags) tp_assign(fd, fd) tp_assign(offset, offset)),
++ TP_printk()
++)
++#endif
++
++#endif /* _TRACE_SYSCALLS_INTEGERS_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
++
++#else /* CREATE_SYSCALL_TABLE */
++
++#include "mips-64-syscalls-3.5.0_integers_override.h"
++#include "syscalls_integers_override.h"
++
++#ifndef OVERRIDE_TABLE_64_sys_sgetmask
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sgetmask, 4068, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sched_yield
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sched_yield, 6023, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_pause
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_pause, 6033, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getpid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getpid, 6038, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getuid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getuid, 6100, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getgid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getgid, 6102, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_geteuid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_geteuid, 6105, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getegid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getegid, 6106, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getppid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getppid, 6108, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getpgrp
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getpgrp, 6109, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setsid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_setsid, 6110, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_munlockall
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_munlockall, 6149, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_vhangup
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_vhangup, 6150, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sync
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sync, 6157, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_gettid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_gettid, 6178, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_restart_syscall
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_restart_syscall, 6214, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_inotify_init
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_inotify_init, 6247, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_nice
++TRACE_SYSCALL_TABLE(sys_nice, sys_nice, 4034, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_ssetmask
++TRACE_SYSCALL_TABLE(sys_ssetmask, sys_ssetmask, 4069, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_32_ipc
++TRACE_SYSCALL_TABLE(sys_32_ipc, sys_32_ipc, 4117, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_bdflush
++TRACE_SYSCALL_TABLE(sys_bdflush, sys_bdflush, 4134, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mips_mmap2
++TRACE_SYSCALL_TABLE(sys_mips_mmap2, sys_mips_mmap2, 4210, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_32_ftruncate64
++TRACE_SYSCALL_TABLE(sys_32_ftruncate64, sys_32_ftruncate64, 4212, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_ioctl
++TRACE_SYSCALL_TABLE(sys_ioctl, sys_ioctl, 5015, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fcntl
++TRACE_SYSCALL_TABLE(sys_fcntl, sys_fcntl, 5070, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_ptrace
++TRACE_SYSCALL_TABLE(sys_ptrace, sys_ptrace, 5099, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_personality
++TRACE_SYSCALL_TABLE(sys_personality, sys_personality, 5132, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_close
++TRACE_SYSCALL_TABLE(sys_close, sys_close, 6003, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_lseek
++TRACE_SYSCALL_TABLE(sys_lseek, sys_lseek, 6008, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mips_mmap
++TRACE_SYSCALL_TABLE(sys_mips_mmap, sys_mips_mmap, 6009, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mprotect
++TRACE_SYSCALL_TABLE(sys_mprotect, sys_mprotect, 6010, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_munmap
++TRACE_SYSCALL_TABLE(sys_munmap, sys_munmap, 6011, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_brk
++TRACE_SYSCALL_TABLE(sys_brk, sys_brk, 6012, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mremap
++TRACE_SYSCALL_TABLE(sys_mremap, sys_mremap, 6024, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_msync
++TRACE_SYSCALL_TABLE(sys_msync, sys_msync, 6025, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_madvise
++TRACE_SYSCALL_TABLE(sys_madvise, sys_madvise, 6027, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_shmget
++TRACE_SYSCALL_TABLE(sys_shmget, sys_shmget, 6028, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_dup
++TRACE_SYSCALL_TABLE(sys_dup, sys_dup, 6031, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_dup2
++TRACE_SYSCALL_TABLE(sys_dup2, sys_dup2, 6032, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_alarm
++TRACE_SYSCALL_TABLE(sys_alarm, sys_alarm, 6037, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_socket
++TRACE_SYSCALL_TABLE(sys_socket, sys_socket, 6040, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_shutdown
++TRACE_SYSCALL_TABLE(sys_shutdown, sys_shutdown, 6047, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_listen
++TRACE_SYSCALL_TABLE(sys_listen, sys_listen, 6049, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_exit
++TRACE_SYSCALL_TABLE(sys_exit, sys_exit, 6058, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_kill
++TRACE_SYSCALL_TABLE(sys_kill, sys_kill, 6060, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_semget
++TRACE_SYSCALL_TABLE(sys_semget, sys_semget, 6062, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_n32_semctl
++TRACE_SYSCALL_TABLE(sys_n32_semctl, sys_n32_semctl, 6064, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_msgget
++TRACE_SYSCALL_TABLE(sys_msgget, sys_msgget, 6066, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_n32_msgsnd
++TRACE_SYSCALL_TABLE(sys_n32_msgsnd, sys_n32_msgsnd, 6067, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_n32_msgrcv
++TRACE_SYSCALL_TABLE(sys_n32_msgrcv, sys_n32_msgrcv, 6068, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_flock
++TRACE_SYSCALL_TABLE(sys_flock, sys_flock, 6071, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fsync
++TRACE_SYSCALL_TABLE(sys_fsync, sys_fsync, 6072, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fdatasync
++TRACE_SYSCALL_TABLE(sys_fdatasync, sys_fdatasync, 6073, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_ftruncate
++TRACE_SYSCALL_TABLE(sys_ftruncate, sys_ftruncate, 6075, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fchdir
++TRACE_SYSCALL_TABLE(sys_fchdir, sys_fchdir, 6079, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fchmod
++TRACE_SYSCALL_TABLE(sys_fchmod, sys_fchmod, 6089, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fchown
++TRACE_SYSCALL_TABLE(sys_fchown, sys_fchown, 6091, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_umask
++TRACE_SYSCALL_TABLE(sys_umask, sys_umask, 6093, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setuid
++TRACE_SYSCALL_TABLE(sys_setuid, sys_setuid, 6103, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setgid
++TRACE_SYSCALL_TABLE(sys_setgid, sys_setgid, 6104, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setpgid
++TRACE_SYSCALL_TABLE(sys_setpgid, sys_setpgid, 6107, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setreuid
++TRACE_SYSCALL_TABLE(sys_setreuid, sys_setreuid, 6111, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setregid
++TRACE_SYSCALL_TABLE(sys_setregid, sys_setregid, 6112, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setresuid
++TRACE_SYSCALL_TABLE(sys_setresuid, sys_setresuid, 6115, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setresgid
++TRACE_SYSCALL_TABLE(sys_setresgid, sys_setresgid, 6117, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getpgid
++TRACE_SYSCALL_TABLE(sys_getpgid, sys_getpgid, 6119, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setfsuid
++TRACE_SYSCALL_TABLE(sys_setfsuid, sys_setfsuid, 6120, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setfsgid
++TRACE_SYSCALL_TABLE(sys_setfsgid, sys_setfsgid, 6121, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getsid
++TRACE_SYSCALL_TABLE(sys_getsid, sys_getsid, 6122, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_32_personality
++TRACE_SYSCALL_TABLE(sys_32_personality, sys_32_personality, 6132, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sysfs
++TRACE_SYSCALL_TABLE(sys_sysfs, sys_sysfs, 6136, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getpriority
++TRACE_SYSCALL_TABLE(sys_getpriority, sys_getpriority, 6137, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setpriority
++TRACE_SYSCALL_TABLE(sys_setpriority, sys_setpriority, 6138, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sched_getscheduler
++TRACE_SYSCALL_TABLE(sys_sched_getscheduler, sys_sched_getscheduler, 6142, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sched_get_priority_max
++TRACE_SYSCALL_TABLE(sys_sched_get_priority_max, sys_sched_get_priority_max, 6143, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sched_get_priority_min
++TRACE_SYSCALL_TABLE(sys_sched_get_priority_min, sys_sched_get_priority_min, 6144, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mlock
++TRACE_SYSCALL_TABLE(sys_mlock, sys_mlock, 6146, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_munlock
++TRACE_SYSCALL_TABLE(sys_munlock, sys_munlock, 6147, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mlockall
++TRACE_SYSCALL_TABLE(sys_mlockall, sys_mlockall, 6148, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_prctl
++TRACE_SYSCALL_TABLE(sys_prctl, sys_prctl, 6153, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_tkill
++TRACE_SYSCALL_TABLE(sys_tkill, sys_tkill, 6192, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_cacheflush
++TRACE_SYSCALL_TABLE(sys_cacheflush, sys_cacheflush, 6197, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_io_destroy
++TRACE_SYSCALL_TABLE(sys_io_destroy, sys_io_destroy, 6201, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_exit_group
++TRACE_SYSCALL_TABLE(sys_exit_group, sys_exit_group, 6205, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_epoll_create
++TRACE_SYSCALL_TABLE(sys_epoll_create, sys_epoll_create, 6207, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_remap_file_pages
++TRACE_SYSCALL_TABLE(sys_remap_file_pages, sys_remap_file_pages, 6210, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_timer_getoverrun
++TRACE_SYSCALL_TABLE(sys_timer_getoverrun, sys_timer_getoverrun, 6223, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_timer_delete
++TRACE_SYSCALL_TABLE(sys_timer_delete, sys_timer_delete, 6224, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_tgkill
++TRACE_SYSCALL_TABLE(sys_tgkill, sys_tgkill, 6229, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_set_thread_area
++TRACE_SYSCALL_TABLE(sys_set_thread_area, sys_set_thread_area, 6246, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_inotify_rm_watch
++TRACE_SYSCALL_TABLE(sys_inotify_rm_watch, sys_inotify_rm_watch, 6249, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_unshare
++TRACE_SYSCALL_TABLE(sys_unshare, sys_unshare, 6266, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_tee
++TRACE_SYSCALL_TABLE(sys_tee, sys_tee, 6269, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_ioprio_set
++TRACE_SYSCALL_TABLE(sys_ioprio_set, sys_ioprio_set, 6277, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_ioprio_get
++TRACE_SYSCALL_TABLE(sys_ioprio_get, sys_ioprio_get, 6278, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_eventfd
++TRACE_SYSCALL_TABLE(sys_eventfd, sys_eventfd, 6282, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_timerfd_create
++TRACE_SYSCALL_TABLE(sys_timerfd_create, sys_timerfd_create, 6284, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_eventfd2
++TRACE_SYSCALL_TABLE(sys_eventfd2, sys_eventfd2, 6288, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_epoll_create1
++TRACE_SYSCALL_TABLE(sys_epoll_create1, sys_epoll_create1, 6289, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_dup3
++TRACE_SYSCALL_TABLE(sys_dup3, sys_dup3, 6290, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_inotify_init1
++TRACE_SYSCALL_TABLE(sys_inotify_init1, sys_inotify_init1, 6292, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_syncfs
++TRACE_SYSCALL_TABLE(sys_syncfs, sys_syncfs, 6306, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setns
++TRACE_SYSCALL_TABLE(sys_setns, sys_setns, 6308, 2)
++#endif
++
++#endif /* CREATE_SYSCALL_TABLE */
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/mips-64-syscalls-3.5.0_integers_override.h
+@@ -0,0 +1,3 @@
++/*
++ * this is a place-holder for MIPS integer syscall definition override.
++ */
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/mips-64-syscalls-3.5.0_pointers.h
+@@ -0,0 +1,2232 @@
++/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */
++#ifndef CREATE_SYSCALL_TABLE
++
++#if !defined(_TRACE_SYSCALLS_POINTERS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SYSCALLS_POINTERS_H
++
++#include <linux/tracepoint.h>
++#include <linux/syscalls.h>
++#include "mips-64-syscalls-3.5.0_pointers_override.h"
++#include "syscalls_pointers_override.h"
++
++#ifndef OVERRIDE_64_sys_oldumount
++SC_TRACE_EVENT(sys_oldumount,
++ TP_PROTO(char * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__string_from_user(name, name)),
++ TP_fast_assign(tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_olduname
++SC_TRACE_EVENT(sys_olduname,
++ TP_PROTO(struct oldold_utsname * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__field_hex(struct oldold_utsname *, name)),
++ TP_fast_assign(tp_assign(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_uselib
++SC_TRACE_EVENT(sys_uselib,
++ TP_PROTO(const char * library),
++ TP_ARGS(library),
++ TP_STRUCT__entry(__field_hex(const char *, library)),
++ TP_fast_assign(tp_assign(library, library)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_uname
++SC_TRACE_EVENT(sys_uname,
++ TP_PROTO(struct old_utsname * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__field_hex(struct old_utsname *, name)),
++ TP_fast_assign(tp_assign(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sysinfo
++SC_TRACE_EVENT(sys_sysinfo,
++ TP_PROTO(struct sysinfo * info),
++ TP_ARGS(info),
++ TP_STRUCT__entry(__field_hex(struct sysinfo *, info)),
++ TP_fast_assign(tp_assign(info, info)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_times
++SC_TRACE_EVENT(sys_times,
++ TP_PROTO(struct tms * tbuf),
++ TP_ARGS(tbuf),
++ TP_STRUCT__entry(__field_hex(struct tms *, tbuf)),
++ TP_fast_assign(tp_assign(tbuf, tbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sysctl
++SC_TRACE_EVENT(sys_sysctl,
++ TP_PROTO(struct __sysctl_args * args),
++ TP_ARGS(args),
++ TP_STRUCT__entry(__field_hex(struct __sysctl_args *, args)),
++ TP_fast_assign(tp_assign(args, args)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_adjtimex
++SC_TRACE_EVENT(sys_adjtimex,
++ TP_PROTO(struct timex * txc_p),
++ TP_ARGS(txc_p),
++ TP_STRUCT__entry(__field_hex(struct timex *, txc_p)),
++ TP_fast_assign(tp_assign(txc_p, txc_p)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_newuname
++SC_TRACE_EVENT(sys_newuname,
++ TP_PROTO(struct new_utsname * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__field_hex(struct new_utsname *, name)),
++ TP_fast_assign(tp_assign(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_shmdt
++SC_TRACE_EVENT(sys_shmdt,
++ TP_PROTO(char * shmaddr),
++ TP_ARGS(shmaddr),
++ TP_STRUCT__entry(__field_hex(char *, shmaddr)),
++ TP_fast_assign(tp_assign(shmaddr, shmaddr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_chdir
++SC_TRACE_EVENT(sys_chdir,
++ TP_PROTO(const char * filename),
++ TP_ARGS(filename),
++ TP_STRUCT__entry(__string_from_user(filename, filename)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_rmdir
++SC_TRACE_EVENT(sys_rmdir,
++ TP_PROTO(const char * pathname),
++ TP_ARGS(pathname),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_unlink
++SC_TRACE_EVENT(sys_unlink,
++ TP_PROTO(const char * pathname),
++ TP_ARGS(pathname),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_chroot
++SC_TRACE_EVENT(sys_chroot,
++ TP_PROTO(const char * filename),
++ TP_ARGS(filename),
++ TP_STRUCT__entry(__string_from_user(filename, filename)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_swapoff
++SC_TRACE_EVENT(sys_swapoff,
++ TP_PROTO(const char * specialfile),
++ TP_ARGS(specialfile),
++ TP_STRUCT__entry(__string_from_user(specialfile, specialfile)),
++ TP_fast_assign(tp_copy_string_from_user(specialfile, specialfile)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_set_tid_address
++SC_TRACE_EVENT(sys_set_tid_address,
++ TP_PROTO(int * tidptr),
++ TP_ARGS(tidptr),
++ TP_STRUCT__entry(__field_hex(int *, tidptr)),
++ TP_fast_assign(tp_assign(tidptr, tidptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_nanosleep
++SC_TRACE_EVENT(sys_nanosleep,
++ TP_PROTO(struct timespec * rqtp, struct timespec * rmtp),
++ TP_ARGS(rqtp, rmtp),
++ TP_STRUCT__entry(__field_hex(struct timespec *, rqtp) __field_hex(struct timespec *, rmtp)),
++ TP_fast_assign(tp_assign(rqtp, rqtp) tp_assign(rmtp, rmtp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getitimer
++SC_TRACE_EVENT(sys_getitimer,
++ TP_PROTO(int which, struct itimerval * value),
++ TP_ARGS(which, value),
++ TP_STRUCT__entry(__field(int, which) __field_hex(struct itimerval *, value)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(value, value)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_gettimeofday
++SC_TRACE_EVENT(sys_gettimeofday,
++ TP_PROTO(struct timeval * tv, struct timezone * tz),
++ TP_ARGS(tv, tz),
++ TP_STRUCT__entry(__field_hex(struct timeval *, tv) __field_hex(struct timezone *, tz)),
++ TP_fast_assign(tp_assign(tv, tv) tp_assign(tz, tz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getrlimit
++SC_TRACE_EVENT(sys_getrlimit,
++ TP_PROTO(unsigned int resource, struct rlimit * rlim),
++ TP_ARGS(resource, rlim),
++ TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
++ TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getrusage
++SC_TRACE_EVENT(sys_getrusage,
++ TP_PROTO(int who, struct rusage * ru),
++ TP_ARGS(who, ru),
++ TP_STRUCT__entry(__field(int, who) __field_hex(struct rusage *, ru)),
++ TP_fast_assign(tp_assign(who, who) tp_assign(ru, ru)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_rt_sigpending
++SC_TRACE_EVENT(sys_rt_sigpending,
++ TP_PROTO(sigset_t * set, size_t sigsetsize),
++ TP_ARGS(set, sigsetsize),
++ TP_STRUCT__entry(__field_hex(sigset_t *, set) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(set, set) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_utime
++SC_TRACE_EVENT(sys_utime,
++ TP_PROTO(char * filename, struct utimbuf * times),
++ TP_ARGS(filename, times),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct utimbuf *, times)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(times, times)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_ustat
++SC_TRACE_EVENT(sys_ustat,
++ TP_PROTO(unsigned dev, struct ustat * ubuf),
++ TP_ARGS(dev, ubuf),
++ TP_STRUCT__entry(__field(unsigned, dev) __field_hex(struct ustat *, ubuf)),
++ TP_fast_assign(tp_assign(dev, dev) tp_assign(ubuf, ubuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_statfs
++SC_TRACE_EVENT(sys_statfs,
++ TP_PROTO(const char * pathname, struct statfs * buf),
++ TP_ARGS(pathname, buf),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(struct statfs *, buf)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fstatfs
++SC_TRACE_EVENT(sys_fstatfs,
++ TP_PROTO(unsigned int fd, struct statfs * buf),
++ TP_ARGS(fd, buf),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct statfs *, buf)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sched_rr_get_interval
++SC_TRACE_EVENT(sys_sched_rr_get_interval,
++ TP_PROTO(pid_t pid, struct timespec * interval),
++ TP_ARGS(pid, interval),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct timespec *, interval)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(interval, interval)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setrlimit
++SC_TRACE_EVENT(sys_setrlimit,
++ TP_PROTO(unsigned int resource, struct rlimit * rlim),
++ TP_ARGS(resource, rlim),
++ TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
++ TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_settimeofday
++SC_TRACE_EVENT(sys_settimeofday,
++ TP_PROTO(struct timeval * tv, struct timezone * tz),
++ TP_ARGS(tv, tz),
++ TP_STRUCT__entry(__field_hex(struct timeval *, tv) __field_hex(struct timezone *, tz)),
++ TP_fast_assign(tp_assign(tv, tv) tp_assign(tz, tz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_io_setup
++SC_TRACE_EVENT(sys_io_setup,
++ TP_PROTO(unsigned nr_events, aio_context_t * ctxp),
++ TP_ARGS(nr_events, ctxp),
++ TP_STRUCT__entry(__field(unsigned, nr_events) __field_hex(aio_context_t *, ctxp)),
++ TP_fast_assign(tp_assign(nr_events, nr_events) tp_assign(ctxp, ctxp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_timer_gettime
++SC_TRACE_EVENT(sys_timer_gettime,
++ TP_PROTO(timer_t timer_id, struct itimerspec * setting),
++ TP_ARGS(timer_id, setting),
++ TP_STRUCT__entry(__field(timer_t, timer_id) __field_hex(struct itimerspec *, setting)),
++ TP_fast_assign(tp_assign(timer_id, timer_id) tp_assign(setting, setting)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_clock_settime
++SC_TRACE_EVENT(sys_clock_settime,
++ TP_PROTO(const clockid_t which_clock, const struct timespec * tp),
++ TP_ARGS(which_clock, tp),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(const struct timespec *, tp)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_clock_gettime
++SC_TRACE_EVENT(sys_clock_gettime,
++ TP_PROTO(const clockid_t which_clock, struct timespec * tp),
++ TP_ARGS(which_clock, tp),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timespec *, tp)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_clock_getres
++SC_TRACE_EVENT(sys_clock_getres,
++ TP_PROTO(const clockid_t which_clock, struct timespec * tp),
++ TP_ARGS(which_clock, tp),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timespec *, tp)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_utimes
++SC_TRACE_EVENT(sys_utimes,
++ TP_PROTO(char * filename, struct timeval * utimes),
++ TP_ARGS(filename, utimes),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct timeval *, utimes)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_set_robust_list
++SC_TRACE_EVENT(sys_set_robust_list,
++ TP_PROTO(struct robust_list_head * head, size_t len),
++ TP_ARGS(head, len),
++ TP_STRUCT__entry(__field_hex(struct robust_list_head *, head) __field(size_t, len)),
++ TP_fast_assign(tp_assign(head, head) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_timerfd_gettime
++SC_TRACE_EVENT(sys_timerfd_gettime,
++ TP_PROTO(int ufd, struct itimerspec * otmr),
++ TP_ARGS(ufd, otmr),
++ TP_STRUCT__entry(__field(int, ufd) __field_hex(struct itimerspec *, otmr)),
++ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(otmr, otmr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_clock_adjtime
++SC_TRACE_EVENT(sys_clock_adjtime,
++ TP_PROTO(const clockid_t which_clock, struct timex * utx),
++ TP_ARGS(which_clock, utx),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timex *, utx)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(utx, utx)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_newstat
++SC_TRACE_EVENT(sys_newstat,
++ TP_PROTO(const char * filename, struct stat * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_newfstat
++SC_TRACE_EVENT(sys_newfstat,
++ TP_PROTO(unsigned int fd, struct stat * statbuf),
++ TP_ARGS(fd, statbuf),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct stat *, statbuf)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_newlstat
++SC_TRACE_EVENT(sys_newlstat,
++ TP_PROTO(const char * filename, struct stat * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_access
++SC_TRACE_EVENT(sys_access,
++ TP_PROTO(const char * filename, int mode),
++ TP_ARGS(filename, mode),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, mode)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_truncate
++SC_TRACE_EVENT(sys_truncate,
++ TP_PROTO(const char * path, long length),
++ TP_ARGS(path, length),
++ TP_STRUCT__entry(__string_from_user(path, path) __field(long, length)),
++ TP_fast_assign(tp_copy_string_from_user(path, path) tp_assign(length, length)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getcwd
++SC_TRACE_EVENT(sys_getcwd,
++ TP_PROTO(char * buf, unsigned long size),
++ TP_ARGS(buf, size),
++ TP_STRUCT__entry(__field_hex(char *, buf) __field(unsigned long, size)),
++ TP_fast_assign(tp_assign(buf, buf) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_rename
++SC_TRACE_EVENT(sys_rename,
++ TP_PROTO(const char * oldname, const char * newname),
++ TP_ARGS(oldname, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mkdir
++SC_TRACE_EVENT(sys_mkdir,
++ TP_PROTO(const char * pathname, umode_t mode),
++ TP_ARGS(pathname, mode),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(umode_t, mode)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_creat
++SC_TRACE_EVENT(sys_creat,
++ TP_PROTO(const char * pathname, umode_t mode),
++ TP_ARGS(pathname, mode),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(umode_t, mode)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_link
++SC_TRACE_EVENT(sys_link,
++ TP_PROTO(const char * oldname, const char * newname),
++ TP_ARGS(oldname, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_symlink
++SC_TRACE_EVENT(sys_symlink,
++ TP_PROTO(const char * oldname, const char * newname),
++ TP_ARGS(oldname, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_chmod
++SC_TRACE_EVENT(sys_chmod,
++ TP_PROTO(const char * filename, umode_t mode),
++ TP_ARGS(filename, mode),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(umode_t, mode)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getgroups
++SC_TRACE_EVENT(sys_getgroups,
++ TP_PROTO(int gidsetsize, gid_t * grouplist),
++ TP_ARGS(gidsetsize, grouplist),
++ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(gid_t *, grouplist)),
++ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setgroups
++SC_TRACE_EVENT(sys_setgroups,
++ TP_PROTO(int gidsetsize, gid_t * grouplist),
++ TP_ARGS(gidsetsize, grouplist),
++ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(gid_t *, grouplist)),
++ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_32_rt_sigpending
++SC_TRACE_EVENT(sys_32_rt_sigpending,
++ TP_PROTO(compat_sigset_t * uset, unsigned int sigsetsize),
++ TP_ARGS(uset, sigsetsize),
++ TP_STRUCT__entry(__field_hex(compat_sigset_t *, uset) __field(unsigned int, sigsetsize)),
++ TP_fast_assign(tp_assign(uset, uset) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sched_setparam
++SC_TRACE_EVENT(sys_sched_setparam,
++ TP_PROTO(pid_t pid, struct sched_param * param),
++ TP_ARGS(pid, param),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct sched_param *, param)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(param, param)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sched_getparam
++SC_TRACE_EVENT(sys_sched_getparam,
++ TP_PROTO(pid_t pid, struct sched_param * param),
++ TP_ARGS(pid, param),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct sched_param *, param)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(param, param)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_32_sched_rr_get_interval
++SC_TRACE_EVENT(sys_32_sched_rr_get_interval,
++ TP_PROTO(compat_pid_t pid, struct compat_timespec * interval),
++ TP_ARGS(pid, interval),
++ TP_STRUCT__entry(__field(compat_pid_t, pid) __field_hex(struct compat_timespec *, interval)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(interval, interval)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_pivot_root
++SC_TRACE_EVENT(sys_pivot_root,
++ TP_PROTO(const char * new_root, const char * put_old),
++ TP_ARGS(new_root, put_old),
++ TP_STRUCT__entry(__string_from_user(new_root, new_root) __string_from_user(put_old, put_old)),
++ TP_fast_assign(tp_copy_string_from_user(new_root, new_root) tp_copy_string_from_user(put_old, put_old)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_umount
++SC_TRACE_EVENT(sys_umount,
++ TP_PROTO(char * name, int flags),
++ TP_ARGS(name, flags),
++ TP_STRUCT__entry(__string_from_user(name, name) __field(int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_swapon
++SC_TRACE_EVENT(sys_swapon,
++ TP_PROTO(const char * specialfile, int swap_flags),
++ TP_ARGS(specialfile, swap_flags),
++ TP_STRUCT__entry(__string_from_user(specialfile, specialfile) __field(int, swap_flags)),
++ TP_fast_assign(tp_copy_string_from_user(specialfile, specialfile) tp_assign(swap_flags, swap_flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sethostname
++SC_TRACE_EVENT(sys_sethostname,
++ TP_PROTO(char * name, int len),
++ TP_ARGS(name, len),
++ TP_STRUCT__entry(__string_from_user(name, name) __field(int, len)),
++ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setdomainname
++SC_TRACE_EVENT(sys_setdomainname,
++ TP_PROTO(char * name, int len),
++ TP_ARGS(name, len),
++ TP_STRUCT__entry(__string_from_user(name, name) __field(int, len)),
++ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_delete_module
++SC_TRACE_EVENT(sys_delete_module,
++ TP_PROTO(const char * name_user, unsigned int flags),
++ TP_ARGS(name_user, flags),
++ TP_STRUCT__entry(__string_from_user(name_user, name_user) __field(unsigned int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(name_user, name_user) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_removexattr
++SC_TRACE_EVENT(sys_removexattr,
++ TP_PROTO(const char * pathname, const char * name),
++ TP_ARGS(pathname, name),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_lremovexattr
++SC_TRACE_EVENT(sys_lremovexattr,
++ TP_PROTO(const char * pathname, const char * name),
++ TP_ARGS(pathname, name),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fremovexattr
++SC_TRACE_EVENT(sys_fremovexattr,
++ TP_PROTO(int fd, const char * name),
++ TP_ARGS(fd, name),
++ TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name)),
++ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_pipe2
++SC_TRACE_EVENT(sys_pipe2,
++ TP_PROTO(int * fildes, int flags),
++ TP_ARGS(fildes, flags),
++ TP_STRUCT__entry(__field_hex(int *, fildes) __field(int, flags)),
++ TP_fast_assign(tp_assign(fildes, fildes) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_waitpid
++SC_TRACE_EVENT(sys_waitpid,
++ TP_PROTO(pid_t pid, int * stat_addr, int options),
++ TP_ARGS(pid, stat_addr, options),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(int *, stat_addr) __field(int, options)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(stat_addr, stat_addr) tp_assign(options, options)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_32_sigaction
++SC_TRACE_EVENT(sys_32_sigaction,
++ TP_PROTO(long sig, const struct sigaction32 * act, struct sigaction32 * oact),
++ TP_ARGS(sig, act, oact),
++ TP_STRUCT__entry(__field(long, sig) __field_hex(const struct sigaction32 *, act) __field_hex(struct sigaction32 *, oact)),
++ TP_fast_assign(tp_assign(sig, sig) tp_assign(act, act) tp_assign(oact, oact)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_readv
++SC_TRACE_EVENT(sys_readv,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen),
++ TP_ARGS(fd, vec, vlen),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_writev
++SC_TRACE_EVENT(sys_writev,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen),
++ TP_ARGS(fd, vec, vlen),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_shmctl
++SC_TRACE_EVENT(sys_shmctl,
++ TP_PROTO(int shmid, int cmd, struct shmid_ds * buf),
++ TP_ARGS(shmid, cmd, buf),
++ TP_STRUCT__entry(__field(int, shmid) __field(int, cmd) __field_hex(struct shmid_ds *, buf)),
++ TP_fast_assign(tp_assign(shmid, shmid) tp_assign(cmd, cmd) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setitimer
++SC_TRACE_EVENT(sys_setitimer,
++ TP_PROTO(int which, struct itimerval * value, struct itimerval * ovalue),
++ TP_ARGS(which, value, ovalue),
++ TP_STRUCT__entry(__field(int, which) __field_hex(struct itimerval *, value) __field_hex(struct itimerval *, ovalue)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(value, value) tp_assign(ovalue, ovalue)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sendmsg
++SC_TRACE_EVENT(sys_sendmsg,
++ TP_PROTO(int fd, struct msghdr * msg, unsigned int flags),
++ TP_ARGS(fd, msg, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct msghdr *, msg) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(msg, msg) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_recvmsg
++SC_TRACE_EVENT(sys_recvmsg,
++ TP_PROTO(int fd, struct msghdr * msg, unsigned int flags),
++ TP_ARGS(fd, msg, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct msghdr *, msg) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(msg, msg) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_msgctl
++SC_TRACE_EVENT(sys_msgctl,
++ TP_PROTO(int msqid, int cmd, struct msqid_ds * buf),
++ TP_ARGS(msqid, cmd, buf),
++ TP_STRUCT__entry(__field(int, msqid) __field(int, cmd) __field_hex(struct msqid_ds *, buf)),
++ TP_fast_assign(tp_assign(msqid, msqid) tp_assign(cmd, cmd) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getdents
++SC_TRACE_EVENT(sys_getdents,
++ TP_PROTO(unsigned int fd, struct linux_dirent * dirent, unsigned int count),
++ TP_ARGS(fd, dirent, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct linux_dirent *, dirent) __field(unsigned int, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_rt_sigqueueinfo
++SC_TRACE_EVENT(sys_rt_sigqueueinfo,
++ TP_PROTO(pid_t pid, int sig, siginfo_t * uinfo),
++ TP_ARGS(pid, sig, uinfo),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig) __field_hex(siginfo_t *, uinfo)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig) tp_assign(uinfo, uinfo)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sched_setaffinity
++SC_TRACE_EVENT(sys_sched_setaffinity,
++ TP_PROTO(pid_t pid, unsigned int len, unsigned long * user_mask_ptr),
++ TP_ARGS(pid, len, user_mask_ptr),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, len) __field_hex(unsigned long *, user_mask_ptr)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(len, len) tp_assign(user_mask_ptr, user_mask_ptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sched_getaffinity
++SC_TRACE_EVENT(sys_sched_getaffinity,
++ TP_PROTO(pid_t pid, unsigned int len, unsigned long * user_mask_ptr),
++ TP_ARGS(pid, len, user_mask_ptr),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, len) __field_hex(unsigned long *, user_mask_ptr)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(len, len) tp_assign(user_mask_ptr, user_mask_ptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_io_submit
++SC_TRACE_EVENT(sys_io_submit,
++ TP_PROTO(aio_context_t ctx_id, long nr, struct iocb * * iocbpp),
++ TP_ARGS(ctx_id, nr, iocbpp),
++ TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field(long, nr) __field_hex(struct iocb * *, iocbpp)),
++ TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(nr, nr) tp_assign(iocbpp, iocbpp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_timer_create
++SC_TRACE_EVENT(sys_timer_create,
++ TP_PROTO(const clockid_t which_clock, struct sigevent * timer_event_spec, timer_t * created_timer_id),
++ TP_ARGS(which_clock, timer_event_spec, created_timer_id),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct sigevent *, timer_event_spec) __field_hex(timer_t *, created_timer_id)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(timer_event_spec, timer_event_spec) tp_assign(created_timer_id, created_timer_id)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_futimesat
++SC_TRACE_EVENT(sys_futimesat,
++ TP_PROTO(int dfd, const char * filename, struct timeval * utimes),
++ TP_ARGS(dfd, filename, utimes),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct timeval *, utimes)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_get_robust_list
++SC_TRACE_EVENT(sys_get_robust_list,
++ TP_PROTO(int pid, struct robust_list_head * * head_ptr, size_t * len_ptr),
++ TP_ARGS(pid, head_ptr, len_ptr),
++ TP_STRUCT__entry(__field(int, pid) __field_hex(struct robust_list_head * *, head_ptr) __field_hex(size_t *, len_ptr)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(head_ptr, head_ptr) tp_assign(len_ptr, len_ptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_signalfd
++SC_TRACE_EVENT(sys_signalfd,
++ TP_PROTO(int ufd, sigset_t * user_mask, size_t sizemask),
++ TP_ARGS(ufd, user_mask, sizemask),
++ TP_STRUCT__entry(__field(int, ufd) __field_hex(sigset_t *, user_mask) __field(size_t, sizemask)),
++ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(user_mask, user_mask) tp_assign(sizemask, sizemask)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_read
++SC_TRACE_EVENT(sys_read,
++ TP_PROTO(unsigned int fd, char * buf, size_t count),
++ TP_ARGS(fd, buf, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(char *, buf) __field(size_t, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_write
++SC_TRACE_EVENT(sys_write,
++ TP_PROTO(unsigned int fd, const char * buf, size_t count),
++ TP_ARGS(fd, buf, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(const char *, buf) __field(size_t, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_open
++SC_TRACE_EVENT(sys_open,
++ TP_PROTO(const char * filename, int flags, umode_t mode),
++ TP_ARGS(filename, flags, mode),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, flags) __field(umode_t, mode)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(flags, flags) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_poll
++SC_TRACE_EVENT(sys_poll,
++ TP_PROTO(struct pollfd * ufds, unsigned int nfds, int timeout_msecs),
++ TP_ARGS(ufds, nfds, timeout_msecs),
++ TP_STRUCT__entry(__field_hex(struct pollfd *, ufds) __field(unsigned int, nfds) __field(int, timeout_msecs)),
++ TP_fast_assign(tp_assign(ufds, ufds) tp_assign(nfds, nfds) tp_assign(timeout_msecs, timeout_msecs)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mincore
++SC_TRACE_EVENT(sys_mincore,
++ TP_PROTO(unsigned long start, size_t len, unsigned char * vec),
++ TP_ARGS(start, len, vec),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field_hex(unsigned char *, vec)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(vec, vec)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_shmat
++SC_TRACE_EVENT(sys_shmat,
++ TP_PROTO(int shmid, char * shmaddr, int shmflg),
++ TP_ARGS(shmid, shmaddr, shmflg),
++ TP_STRUCT__entry(__field(int, shmid) __field_hex(char *, shmaddr) __field(int, shmflg)),
++ TP_fast_assign(tp_assign(shmid, shmid) tp_assign(shmaddr, shmaddr) tp_assign(shmflg, shmflg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_connect
++SC_TRACE_EVENT(sys_connect,
++ TP_PROTO(int fd, struct sockaddr * uservaddr, int addrlen),
++ TP_ARGS(fd, uservaddr, addrlen),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, uservaddr) __field_hex(int, addrlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(uservaddr, uservaddr) tp_assign(addrlen, addrlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_accept
++SC_TRACE_EVENT(sys_accept,
++ TP_PROTO(int fd, struct sockaddr * upeer_sockaddr, int * upeer_addrlen),
++ TP_ARGS(fd, upeer_sockaddr, upeer_addrlen),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, upeer_sockaddr) __field_hex(int *, upeer_addrlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(upeer_sockaddr, upeer_sockaddr) tp_assign(upeer_addrlen, upeer_addrlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_bind
++SC_TRACE_EVENT(sys_bind,
++ TP_PROTO(int fd, struct sockaddr * umyaddr, int addrlen),
++ TP_ARGS(fd, umyaddr, addrlen),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, umyaddr) __field_hex(int, addrlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(umyaddr, umyaddr) tp_assign(addrlen, addrlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getsockname
++SC_TRACE_EVENT(sys_getsockname,
++ TP_PROTO(int fd, struct sockaddr * usockaddr, int * usockaddr_len),
++ TP_ARGS(fd, usockaddr, usockaddr_len),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, usockaddr) __field_hex(int *, usockaddr_len)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(usockaddr, usockaddr) tp_assign(usockaddr_len, usockaddr_len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getpeername
++SC_TRACE_EVENT(sys_getpeername,
++ TP_PROTO(int fd, struct sockaddr * usockaddr, int * usockaddr_len),
++ TP_ARGS(fd, usockaddr, usockaddr_len),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, usockaddr) __field_hex(int *, usockaddr_len)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(usockaddr, usockaddr) tp_assign(usockaddr_len, usockaddr_len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_semop
++SC_TRACE_EVENT(sys_semop,
++ TP_PROTO(int semid, struct sembuf * tsops, unsigned nsops),
++ TP_ARGS(semid, tsops, nsops),
++ TP_STRUCT__entry(__field(int, semid) __field_hex(struct sembuf *, tsops) __field(unsigned, nsops)),
++ TP_fast_assign(tp_assign(semid, semid) tp_assign(tsops, tsops) tp_assign(nsops, nsops)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_readlink
++SC_TRACE_EVENT(sys_readlink,
++ TP_PROTO(const char * path, char * buf, int bufsiz),
++ TP_ARGS(path, buf, bufsiz),
++ TP_STRUCT__entry(__string_from_user(path, path) __field_hex(char *, buf) __field(int, bufsiz)),
++ TP_fast_assign(tp_copy_string_from_user(path, path) tp_assign(buf, buf) tp_assign(bufsiz, bufsiz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_chown
++SC_TRACE_EVENT(sys_chown,
++ TP_PROTO(const char * filename, uid_t user, gid_t group),
++ TP_ARGS(filename, user, group),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_lchown
++SC_TRACE_EVENT(sys_lchown,
++ TP_PROTO(const char * filename, uid_t user, gid_t group),
++ TP_ARGS(filename, user, group),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_syslog
++SC_TRACE_EVENT(sys_syslog,
++ TP_PROTO(int type, char * buf, int len),
++ TP_ARGS(type, buf, len),
++ TP_STRUCT__entry(__field(int, type) __field_hex(char *, buf) __field(int, len)),
++ TP_fast_assign(tp_assign(type, type) tp_assign(buf, buf) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getresuid
++SC_TRACE_EVENT(sys_getresuid,
++ TP_PROTO(uid_t * ruidp, uid_t * euidp, uid_t * suidp),
++ TP_ARGS(ruidp, euidp, suidp),
++ TP_STRUCT__entry(__field_hex(uid_t *, ruidp) __field_hex(uid_t *, euidp) __field_hex(uid_t *, suidp)),
++ TP_fast_assign(tp_assign(ruidp, ruidp) tp_assign(euidp, euidp) tp_assign(suidp, suidp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getresgid
++SC_TRACE_EVENT(sys_getresgid,
++ TP_PROTO(gid_t * rgidp, gid_t * egidp, gid_t * sgidp),
++ TP_ARGS(rgidp, egidp, sgidp),
++ TP_STRUCT__entry(__field_hex(gid_t *, rgidp) __field_hex(gid_t *, egidp) __field_hex(gid_t *, sgidp)),
++ TP_fast_assign(tp_assign(rgidp, rgidp) tp_assign(egidp, egidp) tp_assign(sgidp, sgidp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_32_rt_sigqueueinfo
++SC_TRACE_EVENT(sys_32_rt_sigqueueinfo,
++ TP_PROTO(int pid, int sig, compat_siginfo_t * uinfo),
++ TP_ARGS(pid, sig, uinfo),
++ TP_STRUCT__entry(__field(int, pid) __field(int, sig) __field_hex(compat_siginfo_t *, uinfo)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig) tp_assign(uinfo, uinfo)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mknod
++SC_TRACE_EVENT(sys_mknod,
++ TP_PROTO(const char * filename, umode_t mode, unsigned dev),
++ TP_ARGS(filename, mode, dev),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(umode_t, mode) __field(unsigned, dev)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode) tp_assign(dev, dev)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sched_setscheduler
++SC_TRACE_EVENT(sys_sched_setscheduler,
++ TP_PROTO(pid_t pid, int policy, struct sched_param * param),
++ TP_ARGS(pid, policy, param),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, policy) __field_hex(struct sched_param *, param)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(policy, policy) tp_assign(param, param)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_init_module
++SC_TRACE_EVENT(sys_init_module,
++ TP_PROTO(void * umod, unsigned long len, const char * uargs),
++ TP_ARGS(umod, len, uargs),
++ TP_STRUCT__entry(__field_hex(void *, umod) __field(unsigned long, len) __field_hex(const char *, uargs)),
++ TP_fast_assign(tp_assign(umod, umod) tp_assign(len, len) tp_assign(uargs, uargs)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_listxattr
++SC_TRACE_EVENT(sys_listxattr,
++ TP_PROTO(const char * pathname, char * list, size_t size),
++ TP_ARGS(pathname, list, size),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(char *, list) __field(size_t, size)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(list, list) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_llistxattr
++SC_TRACE_EVENT(sys_llistxattr,
++ TP_PROTO(const char * pathname, char * list, size_t size),
++ TP_ARGS(pathname, list, size),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(char *, list) __field(size_t, size)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(list, list) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_flistxattr
++SC_TRACE_EVENT(sys_flistxattr,
++ TP_PROTO(int fd, char * list, size_t size),
++ TP_ARGS(fd, list, size),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(char *, list) __field(size_t, size)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(list, list) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_cachectl
++SC_TRACE_EVENT(sys_cachectl,
++ TP_PROTO(char * addr, int nbytes, int op),
++ TP_ARGS(addr, nbytes, op),
++ TP_STRUCT__entry(__field_hex(char *, addr) __field(int, nbytes) __field(int, op)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(nbytes, nbytes) tp_assign(op, op)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_io_cancel
++SC_TRACE_EVENT(sys_io_cancel,
++ TP_PROTO(aio_context_t ctx_id, struct iocb * iocb, struct io_event * result),
++ TP_ARGS(ctx_id, iocb, result),
++ TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field_hex(struct iocb *, iocb) __field_hex(struct io_event *, result)),
++ TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(iocb, iocb) tp_assign(result, result)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_inotify_add_watch
++SC_TRACE_EVENT(sys_inotify_add_watch,
++ TP_PROTO(int fd, const char * pathname, u32 mask),
++ TP_ARGS(fd, pathname, mask),
++ TP_STRUCT__entry(__field(int, fd) __string_from_user(pathname, pathname) __field(u32, mask)),
++ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(pathname, pathname) tp_assign(mask, mask)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mkdirat
++SC_TRACE_EVENT(sys_mkdirat,
++ TP_PROTO(int dfd, const char * pathname, umode_t mode),
++ TP_ARGS(dfd, pathname, mode),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field(umode_t, mode)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_unlinkat
++SC_TRACE_EVENT(sys_unlinkat,
++ TP_PROTO(int dfd, const char * pathname, int flag),
++ TP_ARGS(dfd, pathname, flag),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field(int, flag)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(flag, flag)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_symlinkat
++SC_TRACE_EVENT(sys_symlinkat,
++ TP_PROTO(const char * oldname, int newdfd, const char * newname),
++ TP_ARGS(oldname, newdfd, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fchmodat
++SC_TRACE_EVENT(sys_fchmodat,
++ TP_PROTO(int dfd, const char * filename, umode_t mode),
++ TP_ARGS(dfd, filename, mode),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(umode_t, mode)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_faccessat
++SC_TRACE_EVENT(sys_faccessat,
++ TP_PROTO(int dfd, const char * filename, int mode),
++ TP_ARGS(dfd, filename, mode),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, mode)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getcpu
++SC_TRACE_EVENT(sys_getcpu,
++ TP_PROTO(unsigned * cpup, unsigned * nodep, struct getcpu_cache * unused),
++ TP_ARGS(cpup, nodep, unused),
++ TP_STRUCT__entry(__field_hex(unsigned *, cpup) __field_hex(unsigned *, nodep) __field_hex(struct getcpu_cache *, unused)),
++ TP_fast_assign(tp_assign(cpup, cpup) tp_assign(nodep, nodep) tp_assign(unused, unused)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getdents64
++SC_TRACE_EVENT(sys_getdents64,
++ TP_PROTO(unsigned int fd, struct linux_dirent64 * dirent, unsigned int count),
++ TP_ARGS(fd, dirent, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct linux_dirent64 *, dirent) __field(unsigned int, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_send
++SC_TRACE_EVENT(sys_send,
++ TP_PROTO(int fd, void * buff, size_t len, unsigned int flags),
++ TP_ARGS(fd, buff, len, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(void *, buff) __field(size_t, len) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buff, buff) tp_assign(len, len) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_32_truncate64
++SC_TRACE_EVENT(sys_32_truncate64,
++ TP_PROTO(const char * path, unsigned long __dummy, unsigned long a2, unsigned long a3),
++ TP_ARGS(path, __dummy, a2, a3),
++ TP_STRUCT__entry(__string_from_user(path, path) __field(unsigned long, __dummy) __field(unsigned long, a2) __field(unsigned long, a3)),
++ TP_fast_assign(tp_copy_string_from_user(path, path) tp_assign(__dummy, __dummy) tp_assign(a2, a2) tp_assign(a3, a3)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_rt_sigaction
++SC_TRACE_EVENT(sys_rt_sigaction,
++ TP_PROTO(int sig, const struct sigaction * act, struct sigaction * oact, size_t sigsetsize),
++ TP_ARGS(sig, act, oact, sigsetsize),
++ TP_STRUCT__entry(__field(int, sig) __field_hex(const struct sigaction *, act) __field_hex(struct sigaction *, oact) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(sig, sig) tp_assign(act, act) tp_assign(oact, oact) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_rt_sigprocmask
++SC_TRACE_EVENT(sys_rt_sigprocmask,
++ TP_PROTO(int how, sigset_t * nset, sigset_t * oset, size_t sigsetsize),
++ TP_ARGS(how, nset, oset, sigsetsize),
++ TP_STRUCT__entry(__field(int, how) __field_hex(sigset_t *, nset) __field_hex(sigset_t *, oset) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(how, how) tp_assign(nset, nset) tp_assign(oset, oset) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_wait4
++SC_TRACE_EVENT(sys_wait4,
++ TP_PROTO(pid_t upid, int * stat_addr, int options, struct rusage * ru),
++ TP_ARGS(upid, stat_addr, options, ru),
++ TP_STRUCT__entry(__field(pid_t, upid) __field_hex(int *, stat_addr) __field(int, options) __field_hex(struct rusage *, ru)),
++ TP_fast_assign(tp_assign(upid, upid) tp_assign(stat_addr, stat_addr) tp_assign(options, options) tp_assign(ru, ru)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_msgsnd
++SC_TRACE_EVENT(sys_msgsnd,
++ TP_PROTO(int msqid, struct msgbuf * msgp, size_t msgsz, int msgflg),
++ TP_ARGS(msqid, msgp, msgsz, msgflg),
++ TP_STRUCT__entry(__field(int, msqid) __field_hex(struct msgbuf *, msgp) __field(size_t, msgsz) __field(int, msgflg)),
++ TP_fast_assign(tp_assign(msqid, msqid) tp_assign(msgp, msgp) tp_assign(msgsz, msgsz) tp_assign(msgflg, msgflg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_rt_sigtimedwait
++SC_TRACE_EVENT(sys_rt_sigtimedwait,
++ TP_PROTO(const sigset_t * uthese, siginfo_t * uinfo, const struct timespec * uts, size_t sigsetsize),
++ TP_ARGS(uthese, uinfo, uts, sigsetsize),
++ TP_STRUCT__entry(__field_hex(const sigset_t *, uthese) __field_hex(siginfo_t *, uinfo) __field_hex(const struct timespec *, uts) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(uthese, uthese) tp_assign(uinfo, uinfo) tp_assign(uts, uts) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_semtimedop
++SC_TRACE_EVENT(sys_semtimedop,
++ TP_PROTO(int semid, struct sembuf * tsops, unsigned nsops, const struct timespec * timeout),
++ TP_ARGS(semid, tsops, nsops, timeout),
++ TP_STRUCT__entry(__field(int, semid) __field_hex(struct sembuf *, tsops) __field(unsigned, nsops) __field_hex(const struct timespec *, timeout)),
++ TP_fast_assign(tp_assign(semid, semid) tp_assign(tsops, tsops) tp_assign(nsops, nsops) tp_assign(timeout, timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_timer_settime
++SC_TRACE_EVENT(sys_timer_settime,
++ TP_PROTO(timer_t timer_id, int flags, const struct itimerspec * new_setting, struct itimerspec * old_setting),
++ TP_ARGS(timer_id, flags, new_setting, old_setting),
++ TP_STRUCT__entry(__field(timer_t, timer_id) __field(int, flags) __field_hex(const struct itimerspec *, new_setting) __field_hex(struct itimerspec *, old_setting)),
++ TP_fast_assign(tp_assign(timer_id, timer_id) tp_assign(flags, flags) tp_assign(new_setting, new_setting) tp_assign(old_setting, old_setting)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_clock_nanosleep
++SC_TRACE_EVENT(sys_clock_nanosleep,
++ TP_PROTO(const clockid_t which_clock, int flags, const struct timespec * rqtp, struct timespec * rmtp),
++ TP_ARGS(which_clock, flags, rqtp, rmtp),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field(int, flags) __field_hex(const struct timespec *, rqtp) __field_hex(struct timespec *, rmtp)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(flags, flags) tp_assign(rqtp, rqtp) tp_assign(rmtp, rmtp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_vmsplice
++SC_TRACE_EVENT(sys_vmsplice,
++ TP_PROTO(int fd, const struct iovec * iov, unsigned long nr_segs, unsigned int flags),
++ TP_ARGS(fd, iov, nr_segs, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(const struct iovec *, iov) __field(unsigned long, nr_segs) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(iov, iov) tp_assign(nr_segs, nr_segs) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_utimensat
++SC_TRACE_EVENT(sys_utimensat,
++ TP_PROTO(int dfd, const char * filename, struct timespec * utimes, int flags),
++ TP_ARGS(dfd, filename, utimes, flags),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct timespec *, utimes) __field(int, flags)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_timerfd_settime
++SC_TRACE_EVENT(sys_timerfd_settime,
++ TP_PROTO(int ufd, int flags, const struct itimerspec * utmr, struct itimerspec * otmr),
++ TP_ARGS(ufd, flags, utmr, otmr),
++ TP_STRUCT__entry(__field(int, ufd) __field(int, flags) __field_hex(const struct itimerspec *, utmr) __field_hex(struct itimerspec *, otmr)),
++ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(flags, flags) tp_assign(utmr, utmr) tp_assign(otmr, otmr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_rt_tgsigqueueinfo
++SC_TRACE_EVENT(sys_rt_tgsigqueueinfo,
++ TP_PROTO(pid_t tgid, pid_t pid, int sig, siginfo_t * uinfo),
++ TP_ARGS(tgid, pid, sig, uinfo),
++ TP_STRUCT__entry(__field(pid_t, tgid) __field(pid_t, pid) __field(int, sig) __field_hex(siginfo_t *, uinfo)),
++ TP_fast_assign(tp_assign(tgid, tgid) tp_assign(pid, pid) tp_assign(sig, sig) tp_assign(uinfo, uinfo)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sendmmsg
++SC_TRACE_EVENT(sys_sendmmsg,
++ TP_PROTO(int fd, struct mmsghdr * mmsg, unsigned int vlen, unsigned int flags),
++ TP_ARGS(fd, mmsg, vlen, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct mmsghdr *, mmsg) __field(unsigned int, vlen) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(mmsg, mmsg) tp_assign(vlen, vlen) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_32_rt_sigaction
++SC_TRACE_EVENT(sys_32_rt_sigaction,
++ TP_PROTO(int sig, const struct sigaction32 * act, struct sigaction32 * oact, unsigned int sigsetsize),
++ TP_ARGS(sig, act, oact, sigsetsize),
++ TP_STRUCT__entry(__field(int, sig) __field_hex(const struct sigaction32 *, act) __field_hex(struct sigaction32 *, oact) __field(unsigned int, sigsetsize)),
++ TP_fast_assign(tp_assign(sig, sig) tp_assign(act, act) tp_assign(oact, oact) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_32_rt_sigprocmask
++SC_TRACE_EVENT(sys_32_rt_sigprocmask,
++ TP_PROTO(int how, compat_sigset_t * set, compat_sigset_t * oset, unsigned int sigsetsize),
++ TP_ARGS(how, set, oset, sigsetsize),
++ TP_STRUCT__entry(__field(int, how) __field_hex(compat_sigset_t *, set) __field_hex(compat_sigset_t *, oset) __field(unsigned int, sigsetsize)),
++ TP_fast_assign(tp_assign(how, how) tp_assign(set, set) tp_assign(oset, oset) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_32_sendfile
++SC_TRACE_EVENT(sys_32_sendfile,
++ TP_PROTO(long out_fd, long in_fd, compat_off_t * offset, s32 count),
++ TP_ARGS(out_fd, in_fd, offset, count),
++ TP_STRUCT__entry(__field(long, out_fd) __field(long, in_fd) __field_hex(compat_off_t *, offset) __field(s32, count)),
++ TP_fast_assign(tp_assign(out_fd, out_fd) tp_assign(in_fd, in_fd) tp_assign(offset, offset) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_socketpair
++SC_TRACE_EVENT(sys_socketpair,
++ TP_PROTO(int family, int type, int protocol, int * usockvec),
++ TP_ARGS(family, type, protocol, usockvec),
++ TP_STRUCT__entry(__field(int, family) __field(int, type) __field(int, protocol) __field_hex(int *, usockvec)),
++ TP_fast_assign(tp_assign(family, family) tp_assign(type, type) tp_assign(protocol, protocol) tp_assign(usockvec, usockvec)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_reboot
++SC_TRACE_EVENT(sys_reboot,
++ TP_PROTO(int magic1, int magic2, unsigned int cmd, void * arg),
++ TP_ARGS(magic1, magic2, cmd, arg),
++ TP_STRUCT__entry(__field(int, magic1) __field(int, magic2) __field(unsigned int, cmd) __field_hex(void *, arg)),
++ TP_fast_assign(tp_assign(magic1, magic1) tp_assign(magic2, magic2) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_quotactl
++SC_TRACE_EVENT(sys_quotactl,
++ TP_PROTO(unsigned int cmd, const char * special, qid_t id, void * addr),
++ TP_ARGS(cmd, special, id, addr),
++ TP_STRUCT__entry(__field(unsigned int, cmd) __field_hex(const char *, special) __field(qid_t, id) __field_hex(void *, addr)),
++ TP_fast_assign(tp_assign(cmd, cmd) tp_assign(special, special) tp_assign(id, id) tp_assign(addr, addr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getxattr
++SC_TRACE_EVENT(sys_getxattr,
++ TP_PROTO(const char * pathname, const char * name, void * value, size_t size),
++ TP_ARGS(pathname, name, value, size),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_lgetxattr
++SC_TRACE_EVENT(sys_lgetxattr,
++ TP_PROTO(const char * pathname, const char * name, void * value, size_t size),
++ TP_ARGS(pathname, name, value, size),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fgetxattr
++SC_TRACE_EVENT(sys_fgetxattr,
++ TP_PROTO(int fd, const char * name, void * value, size_t size),
++ TP_ARGS(fd, name, value, size),
++ TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
++ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_epoll_ctl
++SC_TRACE_EVENT(sys_epoll_ctl,
++ TP_PROTO(int epfd, int op, int fd, struct epoll_event * event),
++ TP_ARGS(epfd, op, fd, event),
++ TP_STRUCT__entry(__field(int, epfd) __field(int, op) __field(int, fd) __field_hex(struct epoll_event *, event)),
++ TP_fast_assign(tp_assign(epfd, epfd) tp_assign(op, op) tp_assign(fd, fd) tp_assign(event, event)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_epoll_wait
++SC_TRACE_EVENT(sys_epoll_wait,
++ TP_PROTO(int epfd, struct epoll_event * events, int maxevents, int timeout),
++ TP_ARGS(epfd, events, maxevents, timeout),
++ TP_STRUCT__entry(__field(int, epfd) __field_hex(struct epoll_event *, events) __field(int, maxevents) __field(int, timeout)),
++ TP_fast_assign(tp_assign(epfd, epfd) tp_assign(events, events) tp_assign(maxevents, maxevents) tp_assign(timeout, timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sendfile64
++SC_TRACE_EVENT(sys_sendfile64,
++ TP_PROTO(int out_fd, int in_fd, loff_t * offset, size_t count),
++ TP_ARGS(out_fd, in_fd, offset, count),
++ TP_STRUCT__entry(__field(int, out_fd) __field(int, in_fd) __field_hex(loff_t *, offset) __field(size_t, count)),
++ TP_fast_assign(tp_assign(out_fd, out_fd) tp_assign(in_fd, in_fd) tp_assign(offset, offset) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_openat
++SC_TRACE_EVENT(sys_openat,
++ TP_PROTO(int dfd, const char * filename, int flags, umode_t mode),
++ TP_ARGS(dfd, filename, flags, mode),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, flags) __field(umode_t, mode)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(flags, flags) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mknodat
++SC_TRACE_EVENT(sys_mknodat,
++ TP_PROTO(int dfd, const char * filename, umode_t mode, unsigned dev),
++ TP_ARGS(dfd, filename, mode, dev),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(umode_t, mode) __field(unsigned, dev)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode) tp_assign(dev, dev)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_newfstatat
++SC_TRACE_EVENT(sys_newfstatat,
++ TP_PROTO(int dfd, const char * filename, struct stat * statbuf, int flag),
++ TP_ARGS(dfd, filename, statbuf, flag),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct stat *, statbuf) __field(int, flag)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf) tp_assign(flag, flag)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_renameat
++SC_TRACE_EVENT(sys_renameat,
++ TP_PROTO(int olddfd, const char * oldname, int newdfd, const char * newname),
++ TP_ARGS(olddfd, oldname, newdfd, newname),
++ TP_STRUCT__entry(__field(int, olddfd) __string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_assign(olddfd, olddfd) tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_readlinkat
++SC_TRACE_EVENT(sys_readlinkat,
++ TP_PROTO(int dfd, const char * pathname, char * buf, int bufsiz),
++ TP_ARGS(dfd, pathname, buf, bufsiz),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field_hex(char *, buf) __field(int, bufsiz)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(buf, buf) tp_assign(bufsiz, bufsiz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_signalfd4
++SC_TRACE_EVENT(sys_signalfd4,
++ TP_PROTO(int ufd, sigset_t * user_mask, size_t sizemask, int flags),
++ TP_ARGS(ufd, user_mask, sizemask, flags),
++ TP_STRUCT__entry(__field(int, ufd) __field_hex(sigset_t *, user_mask) __field(size_t, sizemask) __field(int, flags)),
++ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(user_mask, user_mask) tp_assign(sizemask, sizemask) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_accept4
++SC_TRACE_EVENT(sys_accept4,
++ TP_PROTO(int fd, struct sockaddr * upeer_sockaddr, int * upeer_addrlen, int flags),
++ TP_ARGS(fd, upeer_sockaddr, upeer_addrlen, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, upeer_sockaddr) __field_hex(int *, upeer_addrlen) __field(int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(upeer_sockaddr, upeer_sockaddr) tp_assign(upeer_addrlen, upeer_addrlen) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_prlimit64
++SC_TRACE_EVENT(sys_prlimit64,
++ TP_PROTO(pid_t pid, unsigned int resource, const struct rlimit64 * new_rlim, struct rlimit64 * old_rlim),
++ TP_ARGS(pid, resource, new_rlim, old_rlim),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, resource) __field_hex(const struct rlimit64 *, new_rlim) __field_hex(struct rlimit64 *, old_rlim)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(resource, resource) tp_assign(new_rlim, new_rlim) tp_assign(old_rlim, old_rlim)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_32_llseek
++SC_TRACE_EVENT(sys_32_llseek,
++ TP_PROTO(unsigned int fd, unsigned int offset_high, unsigned int offset_low, loff_t * result, unsigned int origin),
++ TP_ARGS(fd, offset_high, offset_low, result, origin),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, offset_high) __field(unsigned int, offset_low) __field_hex(loff_t *, result) __field(unsigned int, origin)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(offset_high, offset_high) tp_assign(offset_low, offset_low) tp_assign(result, result) tp_assign(origin, origin)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_32_waitid
++SC_TRACE_EVENT(sys_32_waitid,
++ TP_PROTO(int which, compat_pid_t pid, compat_siginfo_t * uinfo, int options, struct compat_rusage * uru),
++ TP_ARGS(which, pid, uinfo, options, uru),
++ TP_STRUCT__entry(__field(int, which) __field(compat_pid_t, pid) __field_hex(compat_siginfo_t *, uinfo) __field(int, options) __field_hex(struct compat_rusage *, uru)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(pid, pid) tp_assign(uinfo, uinfo) tp_assign(options, options) tp_assign(uru, uru)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_select
++SC_TRACE_EVENT(sys_select,
++ TP_PROTO(int n, fd_set * inp, fd_set * outp, fd_set * exp, struct timeval * tvp),
++ TP_ARGS(n, inp, outp, exp, tvp),
++ TP_STRUCT__entry(__field(int, n) __field_hex(fd_set *, inp) __field_hex(fd_set *, outp) __field_hex(fd_set *, exp) __field_hex(struct timeval *, tvp)),
++ TP_fast_assign(tp_assign(n, n) tp_assign(inp, inp) tp_assign(outp, outp) tp_assign(exp, exp) tp_assign(tvp, tvp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setsockopt
++SC_TRACE_EVENT(sys_setsockopt,
++ TP_PROTO(int fd, int level, int optname, char * optval, int optlen),
++ TP_ARGS(fd, level, optname, optval, optlen),
++ TP_STRUCT__entry(__field(int, fd) __field(int, level) __field(int, optname) __field_hex(char *, optval) __field(int, optlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(level, level) tp_assign(optname, optname) tp_assign(optval, optval) tp_assign(optlen, optlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_msgrcv
++SC_TRACE_EVENT(sys_msgrcv,
++ TP_PROTO(int msqid, struct msgbuf * msgp, size_t msgsz, long msgtyp, int msgflg),
++ TP_ARGS(msqid, msgp, msgsz, msgtyp, msgflg),
++ TP_STRUCT__entry(__field(int, msqid) __field_hex(struct msgbuf *, msgp) __field(size_t, msgsz) __field(long, msgtyp) __field(int, msgflg)),
++ TP_fast_assign(tp_assign(msqid, msqid) tp_assign(msgp, msgp) tp_assign(msgsz, msgsz) tp_assign(msgtyp, msgtyp) tp_assign(msgflg, msgflg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mount
++SC_TRACE_EVENT(sys_mount,
++ TP_PROTO(char * dev_name, char * dir_name, char * type, unsigned long flags, void * data),
++ TP_ARGS(dev_name, dir_name, type, flags, data),
++ TP_STRUCT__entry(__string_from_user(dev_name, dev_name) __string_from_user(dir_name, dir_name) __string_from_user(type, type) __field(unsigned long, flags) __field_hex(void *, data)),
++ TP_fast_assign(tp_copy_string_from_user(dev_name, dev_name) tp_copy_string_from_user(dir_name, dir_name) tp_copy_string_from_user(type, type) tp_assign(flags, flags) tp_assign(data, data)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_io_getevents
++SC_TRACE_EVENT(sys_io_getevents,
++ TP_PROTO(aio_context_t ctx_id, long min_nr, long nr, struct io_event * events, struct timespec * timeout),
++ TP_ARGS(ctx_id, min_nr, nr, events, timeout),
++ TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field(long, min_nr) __field(long, nr) __field_hex(struct io_event *, events) __field_hex(struct timespec *, timeout)),
++ TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(min_nr, min_nr) tp_assign(nr, nr) tp_assign(events, events) tp_assign(timeout, timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_waitid
++SC_TRACE_EVENT(sys_waitid,
++ TP_PROTO(int which, pid_t upid, struct siginfo * infop, int options, struct rusage * ru),
++ TP_ARGS(which, upid, infop, options, ru),
++ TP_STRUCT__entry(__field(int, which) __field(pid_t, upid) __field_hex(struct siginfo *, infop) __field(int, options) __field_hex(struct rusage *, ru)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(upid, upid) tp_assign(infop, infop) tp_assign(options, options) tp_assign(ru, ru)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_ppoll
++SC_TRACE_EVENT(sys_ppoll,
++ TP_PROTO(struct pollfd * ufds, unsigned int nfds, struct timespec * tsp, const sigset_t * sigmask, size_t sigsetsize),
++ TP_ARGS(ufds, nfds, tsp, sigmask, sigsetsize),
++ TP_STRUCT__entry(__field_hex(struct pollfd *, ufds) __field(unsigned int, nfds) __field_hex(struct timespec *, tsp) __field_hex(const sigset_t *, sigmask) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(ufds, ufds) tp_assign(nfds, nfds) tp_assign(tsp, tsp) tp_assign(sigmask, sigmask) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_recvmmsg
++SC_TRACE_EVENT(sys_recvmmsg,
++ TP_PROTO(int fd, struct mmsghdr * mmsg, unsigned int vlen, unsigned int flags, struct timespec * timeout),
++ TP_ARGS(fd, mmsg, vlen, flags, timeout),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct mmsghdr *, mmsg) __field(unsigned int, vlen) __field(unsigned int, flags) __field_hex(struct timespec *, timeout)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(mmsg, mmsg) tp_assign(vlen, vlen) tp_assign(flags, flags) tp_assign(timeout, timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getsockopt
++SC_TRACE_EVENT(sys_getsockopt,
++ TP_PROTO(int fd, int level, int optname, char * optval, int * optlen),
++ TP_ARGS(fd, level, optname, optval, optlen),
++ TP_STRUCT__entry(__field(int, fd) __field(int, level) __field(int, optname) __field_hex(char *, optval) __field_hex(int *, optlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(level, level) tp_assign(optname, optname) tp_assign(optval, optval) tp_assign(optlen, optlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setxattr
++SC_TRACE_EVENT(sys_setxattr,
++ TP_PROTO(const char * pathname, const char * name, const void * value, size_t size, int flags),
++ TP_ARGS(pathname, name, value, size, flags),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_lsetxattr
++SC_TRACE_EVENT(sys_lsetxattr,
++ TP_PROTO(const char * pathname, const char * name, const void * value, size_t size, int flags),
++ TP_ARGS(pathname, name, value, size, flags),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fsetxattr
++SC_TRACE_EVENT(sys_fsetxattr,
++ TP_PROTO(int fd, const char * name, const void * value, size_t size, int flags),
++ TP_ARGS(fd, name, value, size, flags),
++ TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fchownat
++SC_TRACE_EVENT(sys_fchownat,
++ TP_PROTO(int dfd, const char * filename, uid_t user, gid_t group, int flag),
++ TP_ARGS(dfd, filename, user, group, flag),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group) __field(int, flag)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group) tp_assign(flag, flag)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_linkat
++SC_TRACE_EVENT(sys_linkat,
++ TP_PROTO(int olddfd, const char * oldname, int newdfd, const char * newname, int flags),
++ TP_ARGS(olddfd, oldname, newdfd, newname, flags),
++ TP_STRUCT__entry(__field(int, olddfd) __string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname) __field(int, flags)),
++ TP_fast_assign(tp_assign(olddfd, olddfd) tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_preadv
++SC_TRACE_EVENT(sys_preadv,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h),
++ TP_ARGS(fd, vec, vlen, pos_l, pos_h),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen) __field(unsigned long, pos_l) __field(unsigned long, pos_h)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen) tp_assign(pos_l, pos_l) tp_assign(pos_h, pos_h)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_pwritev
++SC_TRACE_EVENT(sys_pwritev,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h),
++ TP_ARGS(fd, vec, vlen, pos_l, pos_h),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen) __field(unsigned long, pos_l) __field(unsigned long, pos_h)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen) tp_assign(pos_l, pos_l) tp_assign(pos_h, pos_h)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_32_pread
++SC_TRACE_EVENT(sys_32_pread,
++ TP_PROTO(unsigned long fd, char * buf, size_t count, unsigned long unused, unsigned long a4, unsigned long a5),
++ TP_ARGS(fd, buf, count, unused, a4, a5),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(char *, buf) __field(size_t, count) __field(unsigned long, unused) __field(unsigned long, a4) __field(unsigned long, a5)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count) tp_assign(unused, unused) tp_assign(a4, a4) tp_assign(a5, a5)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_32_pwrite
++SC_TRACE_EVENT(sys_32_pwrite,
++ TP_PROTO(unsigned int fd, const char * buf, size_t count, u32 unused, u64 a4, u64 a5),
++ TP_ARGS(fd, buf, count, unused, a4, a5),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(const char *, buf) __field(size_t, count) __field(u32, unused) __field(u64, a4) __field(u64, a5)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count) tp_assign(unused, unused) tp_assign(a4, a4) tp_assign(a5, a5)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_32_fanotify_mark
++SC_TRACE_EVENT(sys_32_fanotify_mark,
++ TP_PROTO(int fanotify_fd, unsigned int flags, u64 a3, u64 a4, int dfd, const char * pathname),
++ TP_ARGS(fanotify_fd, flags, a3, a4, dfd, pathname),
++ TP_STRUCT__entry(__field(int, fanotify_fd) __field(unsigned int, flags) __field(u64, a3) __field(u64, a4) __field(int, dfd) __string_from_user(pathname, pathname)),
++ TP_fast_assign(tp_assign(fanotify_fd, fanotify_fd) tp_assign(flags, flags) tp_assign(a3, a3) tp_assign(a4, a4) tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_recvfrom
++SC_TRACE_EVENT(sys_recvfrom,
++ TP_PROTO(int fd, void * ubuf, size_t size, unsigned int flags, struct sockaddr * addr, int * addr_len),
++ TP_ARGS(fd, ubuf, size, flags, addr, addr_len),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(void *, ubuf) __field(size_t, size) __field(unsigned int, flags) __field_hex(struct sockaddr *, addr) __field_hex(int *, addr_len)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(ubuf, ubuf) tp_assign(size, size) tp_assign(flags, flags) tp_assign(addr, addr) tp_assign(addr_len, addr_len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_futex
++SC_TRACE_EVENT(sys_futex,
++ TP_PROTO(u32 * uaddr, int op, u32 val, struct timespec * utime, u32 * uaddr2, u32 val3),
++ TP_ARGS(uaddr, op, val, utime, uaddr2, val3),
++ TP_STRUCT__entry(__field_hex(u32 *, uaddr) __field(int, op) __field(u32, val) __field_hex(struct timespec *, utime) __field_hex(u32 *, uaddr2) __field(u32, val3)),
++ TP_fast_assign(tp_assign(uaddr, uaddr) tp_assign(op, op) tp_assign(val, val) tp_assign(utime, utime) tp_assign(uaddr2, uaddr2) tp_assign(val3, val3)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_pselect6
++SC_TRACE_EVENT(sys_pselect6,
++ TP_PROTO(int n, fd_set * inp, fd_set * outp, fd_set * exp, struct timespec * tsp, void * sig),
++ TP_ARGS(n, inp, outp, exp, tsp, sig),
++ TP_STRUCT__entry(__field(int, n) __field_hex(fd_set *, inp) __field_hex(fd_set *, outp) __field_hex(fd_set *, exp) __field_hex(struct timespec *, tsp) __field_hex(void *, sig)),
++ TP_fast_assign(tp_assign(n, n) tp_assign(inp, inp) tp_assign(outp, outp) tp_assign(exp, exp) tp_assign(tsp, tsp) tp_assign(sig, sig)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_epoll_pwait
++SC_TRACE_EVENT(sys_epoll_pwait,
++ TP_PROTO(int epfd, struct epoll_event * events, int maxevents, int timeout, const sigset_t * sigmask, size_t sigsetsize),
++ TP_ARGS(epfd, events, maxevents, timeout, sigmask, sigsetsize),
++ TP_STRUCT__entry(__field(int, epfd) __field_hex(struct epoll_event *, events) __field(int, maxevents) __field(int, timeout) __field_hex(const sigset_t *, sigmask) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(epfd, epfd) tp_assign(events, events) tp_assign(maxevents, maxevents) tp_assign(timeout, timeout) tp_assign(sigmask, sigmask) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_process_vm_readv
++SC_TRACE_EVENT(sys_process_vm_readv,
++ TP_PROTO(pid_t pid, const struct iovec * lvec, unsigned long liovcnt, const struct iovec * rvec, unsigned long riovcnt, unsigned long flags),
++ TP_ARGS(pid, lvec, liovcnt, rvec, riovcnt, flags),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(const struct iovec *, lvec) __field(unsigned long, liovcnt) __field_hex(const struct iovec *, rvec) __field(unsigned long, riovcnt) __field(unsigned long, flags)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(lvec, lvec) tp_assign(liovcnt, liovcnt) tp_assign(rvec, rvec) tp_assign(riovcnt, riovcnt) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_process_vm_writev
++SC_TRACE_EVENT(sys_process_vm_writev,
++ TP_PROTO(pid_t pid, const struct iovec * lvec, unsigned long liovcnt, const struct iovec * rvec, unsigned long riovcnt, unsigned long flags),
++ TP_ARGS(pid, lvec, liovcnt, rvec, riovcnt, flags),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(const struct iovec *, lvec) __field(unsigned long, liovcnt) __field_hex(const struct iovec *, rvec) __field(unsigned long, riovcnt) __field(unsigned long, flags)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(lvec, lvec) tp_assign(liovcnt, liovcnt) tp_assign(rvec, rvec) tp_assign(riovcnt, riovcnt) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sendto
++SC_TRACE_EVENT(sys_sendto,
++ TP_PROTO(int fd, void * buff, size_t len, unsigned int flags, struct sockaddr * addr, int addr_len),
++ TP_ARGS(fd, buff, len, flags, addr, addr_len),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(void *, buff) __field(size_t, len) __field(unsigned int, flags) __field_hex(struct sockaddr *, addr) __field_hex(int, addr_len)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buff, buff) tp_assign(len, len) tp_assign(flags, flags) tp_assign(addr, addr) tp_assign(addr_len, addr_len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_32_futex
++SC_TRACE_EVENT(sys_32_futex,
++ TP_PROTO(u32 * uaddr, int op, u32 val, struct compat_timespec * utime, u32 * uaddr2, u32 val3),
++ TP_ARGS(uaddr, op, val, utime, uaddr2, val3),
++ TP_STRUCT__entry(__field_hex(u32 *, uaddr) __field(int, op) __field(u32, val) __field_hex(struct compat_timespec *, utime) __field_hex(u32 *, uaddr2) __field(u32, val3)),
++ TP_fast_assign(tp_assign(uaddr, uaddr) tp_assign(op, op) tp_assign(val, val) tp_assign(utime, utime) tp_assign(uaddr2, uaddr2) tp_assign(val3, val3)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_splice
++SC_TRACE_EVENT(sys_splice,
++ TP_PROTO(int fd_in, loff_t * off_in, int fd_out, loff_t * off_out, size_t len, unsigned int flags),
++ TP_ARGS(fd_in, off_in, fd_out, off_out, len, flags),
++ TP_STRUCT__entry(__field(int, fd_in) __field_hex(loff_t *, off_in) __field(int, fd_out) __field_hex(loff_t *, off_out) __field(size_t, len) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd_in, fd_in) tp_assign(off_in, off_in) tp_assign(fd_out, fd_out) tp_assign(off_out, off_out) tp_assign(len, len) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++
++#endif /* _TRACE_SYSCALLS_POINTERS_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
++
++#else /* CREATE_SYSCALL_TABLE */
++
++#include "mips-64-syscalls-3.5.0_pointers_override.h"
++#include "syscalls_pointers_override.h"
++
++#ifndef OVERRIDE_TABLE_64_sys_waitpid
++TRACE_SYSCALL_TABLE(sys_waitpid, sys_waitpid, 4007, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_oldumount
++TRACE_SYSCALL_TABLE(sys_oldumount, sys_oldumount, 4022, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_olduname
++TRACE_SYSCALL_TABLE(sys_olduname, sys_olduname, 4059, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_32_sigaction
++TRACE_SYSCALL_TABLE(sys_32_sigaction, sys_32_sigaction, 4067, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_uselib
++TRACE_SYSCALL_TABLE(sys_uselib, sys_uselib, 4086, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_uname
++TRACE_SYSCALL_TABLE(sys_uname, sys_uname, 4109, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_32_llseek
++TRACE_SYSCALL_TABLE(sys_32_llseek, sys_32_llseek, 4140, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_send
++TRACE_SYSCALL_TABLE(sys_send, sys_send, 4178, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_32_pread
++TRACE_SYSCALL_TABLE(sys_32_pread, sys_32_pread, 4200, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_32_pwrite
++TRACE_SYSCALL_TABLE(sys_32_pwrite, sys_32_pwrite, 4201, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_32_truncate64
++TRACE_SYSCALL_TABLE(sys_32_truncate64, sys_32_truncate64, 4211, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_32_waitid
++TRACE_SYSCALL_TABLE(sys_32_waitid, sys_32_waitid, 4278, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_32_fanotify_mark
++TRACE_SYSCALL_TABLE(sys_32_fanotify_mark, sys_32_fanotify_mark, 4337, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_rt_sigaction
++TRACE_SYSCALL_TABLE(sys_rt_sigaction, sys_rt_sigaction, 5013, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_rt_sigprocmask
++TRACE_SYSCALL_TABLE(sys_rt_sigprocmask, sys_rt_sigprocmask, 5014, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_readv
++TRACE_SYSCALL_TABLE(sys_readv, sys_readv, 5018, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_writev
++TRACE_SYSCALL_TABLE(sys_writev, sys_writev, 5019, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_select
++TRACE_SYSCALL_TABLE(sys_select, sys_select, 5022, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_shmctl
++TRACE_SYSCALL_TABLE(sys_shmctl, sys_shmctl, 5030, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_nanosleep
++TRACE_SYSCALL_TABLE(sys_nanosleep, sys_nanosleep, 5034, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getitimer
++TRACE_SYSCALL_TABLE(sys_getitimer, sys_getitimer, 5035, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setitimer
++TRACE_SYSCALL_TABLE(sys_setitimer, sys_setitimer, 5036, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_recvfrom
++TRACE_SYSCALL_TABLE(sys_recvfrom, sys_recvfrom, 5044, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sendmsg
++TRACE_SYSCALL_TABLE(sys_sendmsg, sys_sendmsg, 5045, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_recvmsg
++TRACE_SYSCALL_TABLE(sys_recvmsg, sys_recvmsg, 5046, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setsockopt
++TRACE_SYSCALL_TABLE(sys_setsockopt, sys_setsockopt, 5053, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_wait4
++TRACE_SYSCALL_TABLE(sys_wait4, sys_wait4, 5059, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_msgsnd
++TRACE_SYSCALL_TABLE(sys_msgsnd, sys_msgsnd, 5067, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_msgrcv
++TRACE_SYSCALL_TABLE(sys_msgrcv, sys_msgrcv, 5068, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_msgctl
++TRACE_SYSCALL_TABLE(sys_msgctl, sys_msgctl, 5069, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getdents
++TRACE_SYSCALL_TABLE(sys_getdents, sys_getdents, 5076, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_gettimeofday
++TRACE_SYSCALL_TABLE(sys_gettimeofday, sys_gettimeofday, 5094, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getrlimit
++TRACE_SYSCALL_TABLE(sys_getrlimit, sys_getrlimit, 5095, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getrusage
++TRACE_SYSCALL_TABLE(sys_getrusage, sys_getrusage, 5096, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sysinfo
++TRACE_SYSCALL_TABLE(sys_sysinfo, sys_sysinfo, 5097, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_times
++TRACE_SYSCALL_TABLE(sys_times, sys_times, 5098, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_rt_sigpending
++TRACE_SYSCALL_TABLE(sys_rt_sigpending, sys_rt_sigpending, 5125, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_rt_sigtimedwait
++TRACE_SYSCALL_TABLE(sys_rt_sigtimedwait, sys_rt_sigtimedwait, 5126, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_rt_sigqueueinfo
++TRACE_SYSCALL_TABLE(sys_rt_sigqueueinfo, sys_rt_sigqueueinfo, 5127, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_utime
++TRACE_SYSCALL_TABLE(sys_utime, sys_utime, 5130, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_ustat
++TRACE_SYSCALL_TABLE(sys_ustat, sys_ustat, 5133, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_statfs
++TRACE_SYSCALL_TABLE(sys_statfs, sys_statfs, 5134, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fstatfs
++TRACE_SYSCALL_TABLE(sys_fstatfs, sys_fstatfs, 5135, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sched_rr_get_interval
++TRACE_SYSCALL_TABLE(sys_sched_rr_get_interval, sys_sched_rr_get_interval, 5145, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sysctl
++TRACE_SYSCALL_TABLE(sys_sysctl, sys_sysctl, 5152, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_adjtimex
++TRACE_SYSCALL_TABLE(sys_adjtimex, sys_adjtimex, 5154, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setrlimit
++TRACE_SYSCALL_TABLE(sys_setrlimit, sys_setrlimit, 5155, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_settimeofday
++TRACE_SYSCALL_TABLE(sys_settimeofday, sys_settimeofday, 5159, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mount
++TRACE_SYSCALL_TABLE(sys_mount, sys_mount, 5160, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_futex
++TRACE_SYSCALL_TABLE(sys_futex, sys_futex, 5194, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sched_setaffinity
++TRACE_SYSCALL_TABLE(sys_sched_setaffinity, sys_sched_setaffinity, 5195, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sched_getaffinity
++TRACE_SYSCALL_TABLE(sys_sched_getaffinity, sys_sched_getaffinity, 5196, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_io_setup
++TRACE_SYSCALL_TABLE(sys_io_setup, sys_io_setup, 5200, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_io_getevents
++TRACE_SYSCALL_TABLE(sys_io_getevents, sys_io_getevents, 5202, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_io_submit
++TRACE_SYSCALL_TABLE(sys_io_submit, sys_io_submit, 5203, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_semtimedop
++TRACE_SYSCALL_TABLE(sys_semtimedop, sys_semtimedop, 5214, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_timer_create
++TRACE_SYSCALL_TABLE(sys_timer_create, sys_timer_create, 5216, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_timer_settime
++TRACE_SYSCALL_TABLE(sys_timer_settime, sys_timer_settime, 5217, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_timer_gettime
++TRACE_SYSCALL_TABLE(sys_timer_gettime, sys_timer_gettime, 5218, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_clock_settime
++TRACE_SYSCALL_TABLE(sys_clock_settime, sys_clock_settime, 5221, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_clock_gettime
++TRACE_SYSCALL_TABLE(sys_clock_gettime, sys_clock_gettime, 5222, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_clock_getres
++TRACE_SYSCALL_TABLE(sys_clock_getres, sys_clock_getres, 5223, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_clock_nanosleep
++TRACE_SYSCALL_TABLE(sys_clock_nanosleep, sys_clock_nanosleep, 5224, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_utimes
++TRACE_SYSCALL_TABLE(sys_utimes, sys_utimes, 5226, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_waitid
++TRACE_SYSCALL_TABLE(sys_waitid, sys_waitid, 5237, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_futimesat
++TRACE_SYSCALL_TABLE(sys_futimesat, sys_futimesat, 5251, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_pselect6
++TRACE_SYSCALL_TABLE(sys_pselect6, sys_pselect6, 5260, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_ppoll
++TRACE_SYSCALL_TABLE(sys_ppoll, sys_ppoll, 5261, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_vmsplice
++TRACE_SYSCALL_TABLE(sys_vmsplice, sys_vmsplice, 5266, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_set_robust_list
++TRACE_SYSCALL_TABLE(sys_set_robust_list, sys_set_robust_list, 5268, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_get_robust_list
++TRACE_SYSCALL_TABLE(sys_get_robust_list, sys_get_robust_list, 5269, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_epoll_pwait
++TRACE_SYSCALL_TABLE(sys_epoll_pwait, sys_epoll_pwait, 5272, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_utimensat
++TRACE_SYSCALL_TABLE(sys_utimensat, sys_utimensat, 5275, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_signalfd
++TRACE_SYSCALL_TABLE(sys_signalfd, sys_signalfd, 5276, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_timerfd_gettime
++TRACE_SYSCALL_TABLE(sys_timerfd_gettime, sys_timerfd_gettime, 5281, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_timerfd_settime
++TRACE_SYSCALL_TABLE(sys_timerfd_settime, sys_timerfd_settime, 5282, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_rt_tgsigqueueinfo
++TRACE_SYSCALL_TABLE(sys_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, 5291, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_recvmmsg
++TRACE_SYSCALL_TABLE(sys_recvmmsg, sys_recvmmsg, 5294, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_clock_adjtime
++TRACE_SYSCALL_TABLE(sys_clock_adjtime, sys_clock_adjtime, 5300, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sendmmsg
++TRACE_SYSCALL_TABLE(sys_sendmmsg, sys_sendmmsg, 5302, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_process_vm_readv
++TRACE_SYSCALL_TABLE(sys_process_vm_readv, sys_process_vm_readv, 5304, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_process_vm_writev
++TRACE_SYSCALL_TABLE(sys_process_vm_writev, sys_process_vm_writev, 5305, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_read
++TRACE_SYSCALL_TABLE(sys_read, sys_read, 6000, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_write
++TRACE_SYSCALL_TABLE(sys_write, sys_write, 6001, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_open
++TRACE_SYSCALL_TABLE(sys_open, sys_open, 6002, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_newstat
++TRACE_SYSCALL_TABLE(sys_newstat, sys_newstat, 6004, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_newfstat
++TRACE_SYSCALL_TABLE(sys_newfstat, sys_newfstat, 6005, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_newlstat
++TRACE_SYSCALL_TABLE(sys_newlstat, sys_newlstat, 6006, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_poll
++TRACE_SYSCALL_TABLE(sys_poll, sys_poll, 6007, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_32_rt_sigaction
++TRACE_SYSCALL_TABLE(sys_32_rt_sigaction, sys_32_rt_sigaction, 6013, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_32_rt_sigprocmask
++TRACE_SYSCALL_TABLE(sys_32_rt_sigprocmask, sys_32_rt_sigprocmask, 6014, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_access
++TRACE_SYSCALL_TABLE(sys_access, sys_access, 6020, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mincore
++TRACE_SYSCALL_TABLE(sys_mincore, sys_mincore, 6026, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_shmat
++TRACE_SYSCALL_TABLE(sys_shmat, sys_shmat, 6029, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_32_sendfile
++TRACE_SYSCALL_TABLE(sys_32_sendfile, sys_32_sendfile, 6039, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_connect
++TRACE_SYSCALL_TABLE(sys_connect, sys_connect, 6041, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_accept
++TRACE_SYSCALL_TABLE(sys_accept, sys_accept, 6042, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sendto
++TRACE_SYSCALL_TABLE(sys_sendto, sys_sendto, 6043, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_bind
++TRACE_SYSCALL_TABLE(sys_bind, sys_bind, 6048, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getsockname
++TRACE_SYSCALL_TABLE(sys_getsockname, sys_getsockname, 6050, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getpeername
++TRACE_SYSCALL_TABLE(sys_getpeername, sys_getpeername, 6051, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_socketpair
++TRACE_SYSCALL_TABLE(sys_socketpair, sys_socketpair, 6052, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getsockopt
++TRACE_SYSCALL_TABLE(sys_getsockopt, sys_getsockopt, 6054, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_newuname
++TRACE_SYSCALL_TABLE(sys_newuname, sys_newuname, 6061, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_semop
++TRACE_SYSCALL_TABLE(sys_semop, sys_semop, 6063, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_shmdt
++TRACE_SYSCALL_TABLE(sys_shmdt, sys_shmdt, 6065, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_truncate
++TRACE_SYSCALL_TABLE(sys_truncate, sys_truncate, 6074, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getcwd
++TRACE_SYSCALL_TABLE(sys_getcwd, sys_getcwd, 6077, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_chdir
++TRACE_SYSCALL_TABLE(sys_chdir, sys_chdir, 6078, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_rename
++TRACE_SYSCALL_TABLE(sys_rename, sys_rename, 6080, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mkdir
++TRACE_SYSCALL_TABLE(sys_mkdir, sys_mkdir, 6081, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_rmdir
++TRACE_SYSCALL_TABLE(sys_rmdir, sys_rmdir, 6082, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_creat
++TRACE_SYSCALL_TABLE(sys_creat, sys_creat, 6083, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_link
++TRACE_SYSCALL_TABLE(sys_link, sys_link, 6084, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_unlink
++TRACE_SYSCALL_TABLE(sys_unlink, sys_unlink, 6085, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_symlink
++TRACE_SYSCALL_TABLE(sys_symlink, sys_symlink, 6086, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_readlink
++TRACE_SYSCALL_TABLE(sys_readlink, sys_readlink, 6087, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_chmod
++TRACE_SYSCALL_TABLE(sys_chmod, sys_chmod, 6088, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_chown
++TRACE_SYSCALL_TABLE(sys_chown, sys_chown, 6090, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_lchown
++TRACE_SYSCALL_TABLE(sys_lchown, sys_lchown, 6092, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_syslog
++TRACE_SYSCALL_TABLE(sys_syslog, sys_syslog, 6101, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getgroups
++TRACE_SYSCALL_TABLE(sys_getgroups, sys_getgroups, 6113, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setgroups
++TRACE_SYSCALL_TABLE(sys_setgroups, sys_setgroups, 6114, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getresuid
++TRACE_SYSCALL_TABLE(sys_getresuid, sys_getresuid, 6116, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getresgid
++TRACE_SYSCALL_TABLE(sys_getresgid, sys_getresgid, 6118, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_32_rt_sigpending
++TRACE_SYSCALL_TABLE(sys_32_rt_sigpending, sys_32_rt_sigpending, 6125, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_32_rt_sigqueueinfo
++TRACE_SYSCALL_TABLE(sys_32_rt_sigqueueinfo, sys_32_rt_sigqueueinfo, 6127, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mknod
++TRACE_SYSCALL_TABLE(sys_mknod, sys_mknod, 6131, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sched_setparam
++TRACE_SYSCALL_TABLE(sys_sched_setparam, sys_sched_setparam, 6139, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sched_getparam
++TRACE_SYSCALL_TABLE(sys_sched_getparam, sys_sched_getparam, 6140, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sched_setscheduler
++TRACE_SYSCALL_TABLE(sys_sched_setscheduler, sys_sched_setscheduler, 6141, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_32_sched_rr_get_interval
++TRACE_SYSCALL_TABLE(sys_32_sched_rr_get_interval, sys_32_sched_rr_get_interval, 6145, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_pivot_root
++TRACE_SYSCALL_TABLE(sys_pivot_root, sys_pivot_root, 6151, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_chroot
++TRACE_SYSCALL_TABLE(sys_chroot, sys_chroot, 6156, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_umount
++TRACE_SYSCALL_TABLE(sys_umount, sys_umount, 6161, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_swapon
++TRACE_SYSCALL_TABLE(sys_swapon, sys_swapon, 6162, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_swapoff
++TRACE_SYSCALL_TABLE(sys_swapoff, sys_swapoff, 6163, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_reboot
++TRACE_SYSCALL_TABLE(sys_reboot, sys_reboot, 6164, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sethostname
++TRACE_SYSCALL_TABLE(sys_sethostname, sys_sethostname, 6165, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setdomainname
++TRACE_SYSCALL_TABLE(sys_setdomainname, sys_setdomainname, 6166, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_init_module
++TRACE_SYSCALL_TABLE(sys_init_module, sys_init_module, 6168, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_delete_module
++TRACE_SYSCALL_TABLE(sys_delete_module, sys_delete_module, 6169, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_quotactl
++TRACE_SYSCALL_TABLE(sys_quotactl, sys_quotactl, 6172, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setxattr
++TRACE_SYSCALL_TABLE(sys_setxattr, sys_setxattr, 6180, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_lsetxattr
++TRACE_SYSCALL_TABLE(sys_lsetxattr, sys_lsetxattr, 6181, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fsetxattr
++TRACE_SYSCALL_TABLE(sys_fsetxattr, sys_fsetxattr, 6182, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getxattr
++TRACE_SYSCALL_TABLE(sys_getxattr, sys_getxattr, 6183, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_lgetxattr
++TRACE_SYSCALL_TABLE(sys_lgetxattr, sys_lgetxattr, 6184, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fgetxattr
++TRACE_SYSCALL_TABLE(sys_fgetxattr, sys_fgetxattr, 6185, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_listxattr
++TRACE_SYSCALL_TABLE(sys_listxattr, sys_listxattr, 6186, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_llistxattr
++TRACE_SYSCALL_TABLE(sys_llistxattr, sys_llistxattr, 6187, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_flistxattr
++TRACE_SYSCALL_TABLE(sys_flistxattr, sys_flistxattr, 6188, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_removexattr
++TRACE_SYSCALL_TABLE(sys_removexattr, sys_removexattr, 6189, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_lremovexattr
++TRACE_SYSCALL_TABLE(sys_lremovexattr, sys_lremovexattr, 6190, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fremovexattr
++TRACE_SYSCALL_TABLE(sys_fremovexattr, sys_fremovexattr, 6191, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_32_futex
++TRACE_SYSCALL_TABLE(sys_32_futex, sys_32_futex, 6194, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_cachectl
++TRACE_SYSCALL_TABLE(sys_cachectl, sys_cachectl, 6198, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_io_cancel
++TRACE_SYSCALL_TABLE(sys_io_cancel, sys_io_cancel, 6204, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_epoll_ctl
++TRACE_SYSCALL_TABLE(sys_epoll_ctl, sys_epoll_ctl, 6208, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_epoll_wait
++TRACE_SYSCALL_TABLE(sys_epoll_wait, sys_epoll_wait, 6209, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_set_tid_address
++TRACE_SYSCALL_TABLE(sys_set_tid_address, sys_set_tid_address, 6213, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sendfile64
++TRACE_SYSCALL_TABLE(sys_sendfile64, sys_sendfile64, 6219, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_inotify_add_watch
++TRACE_SYSCALL_TABLE(sys_inotify_add_watch, sys_inotify_add_watch, 6248, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_openat
++TRACE_SYSCALL_TABLE(sys_openat, sys_openat, 6251, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mkdirat
++TRACE_SYSCALL_TABLE(sys_mkdirat, sys_mkdirat, 6252, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mknodat
++TRACE_SYSCALL_TABLE(sys_mknodat, sys_mknodat, 6253, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fchownat
++TRACE_SYSCALL_TABLE(sys_fchownat, sys_fchownat, 6254, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_newfstatat
++TRACE_SYSCALL_TABLE(sys_newfstatat, sys_newfstatat, 6256, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_unlinkat
++TRACE_SYSCALL_TABLE(sys_unlinkat, sys_unlinkat, 6257, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_renameat
++TRACE_SYSCALL_TABLE(sys_renameat, sys_renameat, 6258, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_linkat
++TRACE_SYSCALL_TABLE(sys_linkat, sys_linkat, 6259, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_symlinkat
++TRACE_SYSCALL_TABLE(sys_symlinkat, sys_symlinkat, 6260, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_readlinkat
++TRACE_SYSCALL_TABLE(sys_readlinkat, sys_readlinkat, 6261, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fchmodat
++TRACE_SYSCALL_TABLE(sys_fchmodat, sys_fchmodat, 6262, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_faccessat
++TRACE_SYSCALL_TABLE(sys_faccessat, sys_faccessat, 6263, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_splice
++TRACE_SYSCALL_TABLE(sys_splice, sys_splice, 6267, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getcpu
++TRACE_SYSCALL_TABLE(sys_getcpu, sys_getcpu, 6275, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_signalfd4
++TRACE_SYSCALL_TABLE(sys_signalfd4, sys_signalfd4, 6287, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_pipe2
++TRACE_SYSCALL_TABLE(sys_pipe2, sys_pipe2, 6291, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_preadv
++TRACE_SYSCALL_TABLE(sys_preadv, sys_preadv, 6293, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_pwritev
++TRACE_SYSCALL_TABLE(sys_pwritev, sys_pwritev, 6294, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_accept4
++TRACE_SYSCALL_TABLE(sys_accept4, sys_accept4, 6297, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getdents64
++TRACE_SYSCALL_TABLE(sys_getdents64, sys_getdents64, 6299, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_prlimit64
++TRACE_SYSCALL_TABLE(sys_prlimit64, sys_prlimit64, 6302, 4)
++#endif
++
++#endif /* CREATE_SYSCALL_TABLE */
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/mips-64-syscalls-3.5.0_pointers_override.h
+@@ -0,0 +1,8 @@
++#ifndef CREATE_SYSCALL_TABLE
++
++#else /* CREATE_SYSCALL_TABLE */
++
++#define OVERRIDE_TABLE_64_sys_clone
++TRACE_SYSCALL_TABLE(sys_clone, sys_clone, 5055, 0)
++
++#endif /* CREATE_SYSCALL_TABLE */
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/powerpc-32-syscalls-3.0.34_integers.h
+@@ -0,0 +1,1043 @@
++/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */
++#ifndef CREATE_SYSCALL_TABLE
++
++#if !defined(_TRACE_SYSCALLS_INTEGERS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SYSCALLS_INTEGERS_H
++
++#include <linux/tracepoint.h>
++#include <linux/syscalls.h>
++#include "powerpc-32-syscalls-3.0.34_integers_override.h"
++#include "syscalls_integers_override.h"
++
++SC_DECLARE_EVENT_CLASS_NOARGS(syscalls_noargs,
++ TP_STRUCT__entry(),
++ TP_fast_assign(),
++ TP_printk()
++)
++#ifndef OVERRIDE_32_sys_restart_syscall
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_restart_syscall)
++#endif
++#ifndef OVERRIDE_32_sys_getpid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getpid)
++#endif
++#ifndef OVERRIDE_32_sys_getuid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getuid)
++#endif
++#ifndef OVERRIDE_32_sys_pause
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_pause)
++#endif
++#ifndef OVERRIDE_32_sys_sync
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sync)
++#endif
++#ifndef OVERRIDE_32_sys_getgid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getgid)
++#endif
++#ifndef OVERRIDE_32_sys_geteuid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_geteuid)
++#endif
++#ifndef OVERRIDE_32_sys_getegid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getegid)
++#endif
++#ifndef OVERRIDE_32_sys_getppid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getppid)
++#endif
++#ifndef OVERRIDE_32_sys_getpgrp
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getpgrp)
++#endif
++#ifndef OVERRIDE_32_sys_setsid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_setsid)
++#endif
++#ifndef OVERRIDE_32_sys_sgetmask
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sgetmask)
++#endif
++#ifndef OVERRIDE_32_sys_vhangup
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_vhangup)
++#endif
++#ifndef OVERRIDE_32_sys_munlockall
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_munlockall)
++#endif
++#ifndef OVERRIDE_32_sys_sched_yield
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sched_yield)
++#endif
++#ifndef OVERRIDE_32_sys_gettid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_gettid)
++#endif
++#ifndef OVERRIDE_32_sys_inotify_init
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_inotify_init)
++#endif
++#ifndef OVERRIDE_32_sys_exit
++SC_TRACE_EVENT(sys_exit,
++ TP_PROTO(int error_code),
++ TP_ARGS(error_code),
++ TP_STRUCT__entry(__field(int, error_code)),
++ TP_fast_assign(tp_assign(error_code, error_code)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_close
++SC_TRACE_EVENT(sys_close,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setuid
++SC_TRACE_EVENT(sys_setuid,
++ TP_PROTO(uid_t uid),
++ TP_ARGS(uid),
++ TP_STRUCT__entry(__field(uid_t, uid)),
++ TP_fast_assign(tp_assign(uid, uid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_alarm
++SC_TRACE_EVENT(sys_alarm,
++ TP_PROTO(unsigned int seconds),
++ TP_ARGS(seconds),
++ TP_STRUCT__entry(__field(unsigned int, seconds)),
++ TP_fast_assign(tp_assign(seconds, seconds)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_nice
++SC_TRACE_EVENT(sys_nice,
++ TP_PROTO(int increment),
++ TP_ARGS(increment),
++ TP_STRUCT__entry(__field(int, increment)),
++ TP_fast_assign(tp_assign(increment, increment)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_dup
++SC_TRACE_EVENT(sys_dup,
++ TP_PROTO(unsigned int fildes),
++ TP_ARGS(fildes),
++ TP_STRUCT__entry(__field(unsigned int, fildes)),
++ TP_fast_assign(tp_assign(fildes, fildes)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_brk
++SC_TRACE_EVENT(sys_brk,
++ TP_PROTO(unsigned long brk),
++ TP_ARGS(brk),
++ TP_STRUCT__entry(__field(unsigned long, brk)),
++ TP_fast_assign(tp_assign(brk, brk)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setgid
++SC_TRACE_EVENT(sys_setgid,
++ TP_PROTO(gid_t gid),
++ TP_ARGS(gid),
++ TP_STRUCT__entry(__field(gid_t, gid)),
++ TP_fast_assign(tp_assign(gid, gid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_umask
++SC_TRACE_EVENT(sys_umask,
++ TP_PROTO(int mask),
++ TP_ARGS(mask),
++ TP_STRUCT__entry(__field(int, mask)),
++ TP_fast_assign(tp_assign(mask, mask)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ssetmask
++SC_TRACE_EVENT(sys_ssetmask,
++ TP_PROTO(int newmask),
++ TP_ARGS(newmask),
++ TP_STRUCT__entry(__field(int, newmask)),
++ TP_fast_assign(tp_assign(newmask, newmask)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fsync
++SC_TRACE_EVENT(sys_fsync,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getpgid
++SC_TRACE_EVENT(sys_getpgid,
++ TP_PROTO(pid_t pid),
++ TP_ARGS(pid),
++ TP_STRUCT__entry(__field(pid_t, pid)),
++ TP_fast_assign(tp_assign(pid, pid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fchdir
++SC_TRACE_EVENT(sys_fchdir,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_personality
++SC_TRACE_EVENT(sys_personality,
++ TP_PROTO(unsigned int personality),
++ TP_ARGS(personality),
++ TP_STRUCT__entry(__field(unsigned int, personality)),
++ TP_fast_assign(tp_assign(personality, personality)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setfsuid
++SC_TRACE_EVENT(sys_setfsuid,
++ TP_PROTO(uid_t uid),
++ TP_ARGS(uid),
++ TP_STRUCT__entry(__field(uid_t, uid)),
++ TP_fast_assign(tp_assign(uid, uid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setfsgid
++SC_TRACE_EVENT(sys_setfsgid,
++ TP_PROTO(gid_t gid),
++ TP_ARGS(gid),
++ TP_STRUCT__entry(__field(gid_t, gid)),
++ TP_fast_assign(tp_assign(gid, gid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getsid
++SC_TRACE_EVENT(sys_getsid,
++ TP_PROTO(pid_t pid),
++ TP_ARGS(pid),
++ TP_STRUCT__entry(__field(pid_t, pid)),
++ TP_fast_assign(tp_assign(pid, pid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fdatasync
++SC_TRACE_EVENT(sys_fdatasync,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mlockall
++SC_TRACE_EVENT(sys_mlockall,
++ TP_PROTO(int flags),
++ TP_ARGS(flags),
++ TP_STRUCT__entry(__field(int, flags)),
++ TP_fast_assign(tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_getscheduler
++SC_TRACE_EVENT(sys_sched_getscheduler,
++ TP_PROTO(pid_t pid),
++ TP_ARGS(pid),
++ TP_STRUCT__entry(__field(pid_t, pid)),
++ TP_fast_assign(tp_assign(pid, pid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_get_priority_max
++SC_TRACE_EVENT(sys_sched_get_priority_max,
++ TP_PROTO(int policy),
++ TP_ARGS(policy),
++ TP_STRUCT__entry(__field(int, policy)),
++ TP_fast_assign(tp_assign(policy, policy)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_get_priority_min
++SC_TRACE_EVENT(sys_sched_get_priority_min,
++ TP_PROTO(int policy),
++ TP_ARGS(policy),
++ TP_STRUCT__entry(__field(int, policy)),
++ TP_fast_assign(tp_assign(policy, policy)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_io_destroy
++SC_TRACE_EVENT(sys_io_destroy,
++ TP_PROTO(aio_context_t ctx),
++ TP_ARGS(ctx),
++ TP_STRUCT__entry(__field(aio_context_t, ctx)),
++ TP_fast_assign(tp_assign(ctx, ctx)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_exit_group
++SC_TRACE_EVENT(sys_exit_group,
++ TP_PROTO(int error_code),
++ TP_ARGS(error_code),
++ TP_STRUCT__entry(__field(int, error_code)),
++ TP_fast_assign(tp_assign(error_code, error_code)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_epoll_create
++SC_TRACE_EVENT(sys_epoll_create,
++ TP_PROTO(int size),
++ TP_ARGS(size),
++ TP_STRUCT__entry(__field(int, size)),
++ TP_fast_assign(tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timer_getoverrun
++SC_TRACE_EVENT(sys_timer_getoverrun,
++ TP_PROTO(timer_t timer_id),
++ TP_ARGS(timer_id),
++ TP_STRUCT__entry(__field(timer_t, timer_id)),
++ TP_fast_assign(tp_assign(timer_id, timer_id)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timer_delete
++SC_TRACE_EVENT(sys_timer_delete,
++ TP_PROTO(timer_t timer_id),
++ TP_ARGS(timer_id),
++ TP_STRUCT__entry(__field(timer_t, timer_id)),
++ TP_fast_assign(tp_assign(timer_id, timer_id)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_unshare
++SC_TRACE_EVENT(sys_unshare,
++ TP_PROTO(unsigned long unshare_flags),
++ TP_ARGS(unshare_flags),
++ TP_STRUCT__entry(__field(unsigned long, unshare_flags)),
++ TP_fast_assign(tp_assign(unshare_flags, unshare_flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_eventfd
++SC_TRACE_EVENT(sys_eventfd,
++ TP_PROTO(unsigned int count),
++ TP_ARGS(count),
++ TP_STRUCT__entry(__field(unsigned int, count)),
++ TP_fast_assign(tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_epoll_create1
++SC_TRACE_EVENT(sys_epoll_create1,
++ TP_PROTO(int flags),
++ TP_ARGS(flags),
++ TP_STRUCT__entry(__field(int, flags)),
++ TP_fast_assign(tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_inotify_init1
++SC_TRACE_EVENT(sys_inotify_init1,
++ TP_PROTO(int flags),
++ TP_ARGS(flags),
++ TP_STRUCT__entry(__field(int, flags)),
++ TP_fast_assign(tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_syncfs
++SC_TRACE_EVENT(sys_syncfs,
++ TP_PROTO(int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_kill
++SC_TRACE_EVENT(sys_kill,
++ TP_PROTO(pid_t pid, int sig),
++ TP_ARGS(pid, sig),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_signal
++SC_TRACE_EVENT(sys_signal,
++ TP_PROTO(int sig, __sighandler_t handler),
++ TP_ARGS(sig, handler),
++ TP_STRUCT__entry(__field(int, sig) __field(__sighandler_t, handler)),
++ TP_fast_assign(tp_assign(sig, sig) tp_assign(handler, handler)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setpgid
++SC_TRACE_EVENT(sys_setpgid,
++ TP_PROTO(pid_t pid, pid_t pgid),
++ TP_ARGS(pid, pgid),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(pid_t, pgid)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(pgid, pgid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_dup2
++SC_TRACE_EVENT(sys_dup2,
++ TP_PROTO(unsigned int oldfd, unsigned int newfd),
++ TP_ARGS(oldfd, newfd),
++ TP_STRUCT__entry(__field(unsigned int, oldfd) __field(unsigned int, newfd)),
++ TP_fast_assign(tp_assign(oldfd, oldfd) tp_assign(newfd, newfd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setreuid
++SC_TRACE_EVENT(sys_setreuid,
++ TP_PROTO(uid_t ruid, uid_t euid),
++ TP_ARGS(ruid, euid),
++ TP_STRUCT__entry(__field(uid_t, ruid) __field(uid_t, euid)),
++ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setregid
++SC_TRACE_EVENT(sys_setregid,
++ TP_PROTO(gid_t rgid, gid_t egid),
++ TP_ARGS(rgid, egid),
++ TP_STRUCT__entry(__field(gid_t, rgid) __field(gid_t, egid)),
++ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_munmap
++SC_TRACE_EVENT(sys_munmap,
++ TP_PROTO(unsigned long addr, size_t len),
++ TP_ARGS(addr, len),
++ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(size_t, len)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ftruncate
++SC_TRACE_EVENT(sys_ftruncate,
++ TP_PROTO(unsigned int fd, unsigned long length),
++ TP_ARGS(fd, length),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned long, length)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(length, length)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fchmod
++SC_TRACE_EVENT(sys_fchmod,
++ TP_PROTO(unsigned int fd, mode_t mode),
++ TP_ARGS(fd, mode),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(mode_t, mode)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getpriority
++SC_TRACE_EVENT(sys_getpriority,
++ TP_PROTO(int which, int who),
++ TP_ARGS(which, who),
++ TP_STRUCT__entry(__field(int, which) __field(int, who)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_bdflush
++SC_TRACE_EVENT(sys_bdflush,
++ TP_PROTO(int func, long data),
++ TP_ARGS(func, data),
++ TP_STRUCT__entry(__field(int, func) __field(long, data)),
++ TP_fast_assign(tp_assign(func, func) tp_assign(data, data)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_flock
++SC_TRACE_EVENT(sys_flock,
++ TP_PROTO(unsigned int fd, unsigned int cmd),
++ TP_ARGS(fd, cmd),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mlock
++SC_TRACE_EVENT(sys_mlock,
++ TP_PROTO(unsigned long start, size_t len),
++ TP_ARGS(start, len),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_munlock
++SC_TRACE_EVENT(sys_munlock,
++ TP_PROTO(unsigned long start, size_t len),
++ TP_ARGS(start, len),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_tkill
++SC_TRACE_EVENT(sys_tkill,
++ TP_PROTO(pid_t pid, int sig),
++ TP_ARGS(pid, sig),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ioprio_get
++SC_TRACE_EVENT(sys_ioprio_get,
++ TP_PROTO(int which, int who),
++ TP_ARGS(which, who),
++ TP_STRUCT__entry(__field(int, which) __field(int, who)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_inotify_rm_watch
++SC_TRACE_EVENT(sys_inotify_rm_watch,
++ TP_PROTO(int fd, __s32 wd),
++ TP_ARGS(fd, wd),
++ TP_STRUCT__entry(__field(int, fd) __field(__s32, wd)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(wd, wd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timerfd_create
++SC_TRACE_EVENT(sys_timerfd_create,
++ TP_PROTO(int clockid, int flags),
++ TP_ARGS(clockid, flags),
++ TP_STRUCT__entry(__field(int, clockid) __field(int, flags)),
++ TP_fast_assign(tp_assign(clockid, clockid) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_eventfd2
++SC_TRACE_EVENT(sys_eventfd2,
++ TP_PROTO(unsigned int count, int flags),
++ TP_ARGS(count, flags),
++ TP_STRUCT__entry(__field(unsigned int, count) __field(int, flags)),
++ TP_fast_assign(tp_assign(count, count) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_listen
++SC_TRACE_EVENT(sys_listen,
++ TP_PROTO(int fd, int backlog),
++ TP_ARGS(fd, backlog),
++ TP_STRUCT__entry(__field(int, fd) __field(int, backlog)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(backlog, backlog)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_shutdown
++SC_TRACE_EVENT(sys_shutdown,
++ TP_PROTO(int fd, int how),
++ TP_ARGS(fd, how),
++ TP_STRUCT__entry(__field(int, fd) __field(int, how)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(how, how)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setns
++SC_TRACE_EVENT(sys_setns,
++ TP_PROTO(int fd, int nstype),
++ TP_ARGS(fd, nstype),
++ TP_STRUCT__entry(__field(int, fd) __field(int, nstype)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(nstype, nstype)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lseek
++SC_TRACE_EVENT(sys_lseek,
++ TP_PROTO(unsigned int fd, off_t offset, unsigned int origin),
++ TP_ARGS(fd, offset, origin),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(off_t, offset) __field(unsigned int, origin)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(offset, offset) tp_assign(origin, origin)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ioctl
++SC_TRACE_EVENT(sys_ioctl,
++ TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
++ TP_ARGS(fd, cmd, arg),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fcntl
++SC_TRACE_EVENT(sys_fcntl,
++ TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
++ TP_ARGS(fd, cmd, arg),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fchown
++SC_TRACE_EVENT(sys_fchown,
++ TP_PROTO(unsigned int fd, uid_t user, gid_t group),
++ TP_ARGS(fd, user, group),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(uid_t, user) __field(gid_t, group)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setpriority
++SC_TRACE_EVENT(sys_setpriority,
++ TP_PROTO(int which, int who, int niceval),
++ TP_ARGS(which, who, niceval),
++ TP_STRUCT__entry(__field(int, which) __field(int, who) __field(int, niceval)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who) tp_assign(niceval, niceval)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mprotect
++SC_TRACE_EVENT(sys_mprotect,
++ TP_PROTO(unsigned long start, size_t len, unsigned long prot),
++ TP_ARGS(start, len, prot),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field(unsigned long, prot)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(prot, prot)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sysfs
++SC_TRACE_EVENT(sys_sysfs,
++ TP_PROTO(int option, unsigned long arg1, unsigned long arg2),
++ TP_ARGS(option, arg1, arg2),
++ TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg1) __field(unsigned long, arg2)),
++ TP_fast_assign(tp_assign(option, option) tp_assign(arg1, arg1) tp_assign(arg2, arg2)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_msync
++SC_TRACE_EVENT(sys_msync,
++ TP_PROTO(unsigned long start, size_t len, int flags),
++ TP_ARGS(start, len, flags),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field(int, flags)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setresuid
++SC_TRACE_EVENT(sys_setresuid,
++ TP_PROTO(uid_t ruid, uid_t euid, uid_t suid),
++ TP_ARGS(ruid, euid, suid),
++ TP_STRUCT__entry(__field(uid_t, ruid) __field(uid_t, euid) __field(uid_t, suid)),
++ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setresgid
++SC_TRACE_EVENT(sys_setresgid,
++ TP_PROTO(gid_t rgid, gid_t egid, gid_t sgid),
++ TP_ARGS(rgid, egid, sgid),
++ TP_STRUCT__entry(__field(gid_t, rgid) __field(gid_t, egid) __field(gid_t, sgid)),
++ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fcntl64
++SC_TRACE_EVENT(sys_fcntl64,
++ TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
++ TP_ARGS(fd, cmd, arg),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_madvise
++SC_TRACE_EVENT(sys_madvise,
++ TP_PROTO(unsigned long start, size_t len_in, int behavior),
++ TP_ARGS(start, len_in, behavior),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len_in) __field(int, behavior)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len_in, len_in) tp_assign(behavior, behavior)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_tgkill
++SC_TRACE_EVENT(sys_tgkill,
++ TP_PROTO(pid_t tgid, pid_t pid, int sig),
++ TP_ARGS(tgid, pid, sig),
++ TP_STRUCT__entry(__field(pid_t, tgid) __field(pid_t, pid) __field(int, sig)),
++ TP_fast_assign(tp_assign(tgid, tgid) tp_assign(pid, pid) tp_assign(sig, sig)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ioprio_set
++SC_TRACE_EVENT(sys_ioprio_set,
++ TP_PROTO(int which, int who, int ioprio),
++ TP_ARGS(which, who, ioprio),
++ TP_STRUCT__entry(__field(int, which) __field(int, who) __field(int, ioprio)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who) tp_assign(ioprio, ioprio)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_dup3
++SC_TRACE_EVENT(sys_dup3,
++ TP_PROTO(unsigned int oldfd, unsigned int newfd, int flags),
++ TP_ARGS(oldfd, newfd, flags),
++ TP_STRUCT__entry(__field(unsigned int, oldfd) __field(unsigned int, newfd) __field(int, flags)),
++ TP_fast_assign(tp_assign(oldfd, oldfd) tp_assign(newfd, newfd) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_socket
++SC_TRACE_EVENT(sys_socket,
++ TP_PROTO(int family, int type, int protocol),
++ TP_ARGS(family, type, protocol),
++ TP_STRUCT__entry(__field(int, family) __field(int, type) __field(int, protocol)),
++ TP_fast_assign(tp_assign(family, family) tp_assign(type, type) tp_assign(protocol, protocol)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ptrace
++SC_TRACE_EVENT(sys_ptrace,
++ TP_PROTO(long request, long pid, unsigned long addr, unsigned long data),
++ TP_ARGS(request, pid, addr, data),
++ TP_STRUCT__entry(__field(long, request) __field(long, pid) __field_hex(unsigned long, addr) __field(unsigned long, data)),
++ TP_fast_assign(tp_assign(request, request) tp_assign(pid, pid) tp_assign(addr, addr) tp_assign(data, data)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_tee
++SC_TRACE_EVENT(sys_tee,
++ TP_PROTO(int fdin, int fdout, size_t len, unsigned int flags),
++ TP_ARGS(fdin, fdout, len, flags),
++ TP_STRUCT__entry(__field(int, fdin) __field(int, fdout) __field(size_t, len) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fdin, fdin) tp_assign(fdout, fdout) tp_assign(len, len) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mremap
++SC_TRACE_EVENT(sys_mremap,
++ TP_PROTO(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr),
++ TP_ARGS(addr, old_len, new_len, flags, new_addr),
++ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(unsigned long, old_len) __field(unsigned long, new_len) __field(unsigned long, flags) __field_hex(unsigned long, new_addr)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(old_len, old_len) tp_assign(new_len, new_len) tp_assign(flags, flags) tp_assign(new_addr, new_addr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_prctl
++SC_TRACE_EVENT(sys_prctl,
++ TP_PROTO(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5),
++ TP_ARGS(option, arg2, arg3, arg4, arg5),
++ TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg2) __field(unsigned long, arg3) __field(unsigned long, arg4) __field(unsigned long, arg5)),
++ TP_fast_assign(tp_assign(option, option) tp_assign(arg2, arg2) tp_assign(arg3, arg3) tp_assign(arg4, arg4) tp_assign(arg5, arg5)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_remap_file_pages
++SC_TRACE_EVENT(sys_remap_file_pages,
++ TP_PROTO(unsigned long start, unsigned long size, unsigned long prot, unsigned long pgoff, unsigned long flags),
++ TP_ARGS(start, size, prot, pgoff, flags),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(unsigned long, size) __field(unsigned long, prot) __field(unsigned long, pgoff) __field(unsigned long, flags)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(size, size) tp_assign(prot, prot) tp_assign(pgoff, pgoff) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_keyctl
++SC_TRACE_EVENT(sys_keyctl,
++ TP_PROTO(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5),
++ TP_ARGS(option, arg2, arg3, arg4, arg5),
++ TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg2) __field(unsigned long, arg3) __field(unsigned long, arg4) __field(unsigned long, arg5)),
++ TP_fast_assign(tp_assign(option, option) tp_assign(arg2, arg2) tp_assign(arg3, arg3) tp_assign(arg4, arg4) tp_assign(arg5, arg5)),
++ TP_printk()
++)
++#endif
++
++#endif /* _TRACE_SYSCALLS_INTEGERS_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
++
++#else /* CREATE_SYSCALL_TABLE */
++
++#include "powerpc-32-syscalls-3.0.34_integers_override.h"
++#include "syscalls_integers_override.h"
++
++#ifndef OVERRIDE_TABLE_32_sys_restart_syscall
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_restart_syscall, 0, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getpid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getpid, 20, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getuid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getuid, 24, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_pause
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_pause, 29, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sync
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sync, 36, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getgid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getgid, 47, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_geteuid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_geteuid, 49, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getegid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getegid, 50, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getppid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getppid, 64, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getpgrp
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getpgrp, 65, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setsid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_setsid, 66, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sgetmask
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sgetmask, 68, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_vhangup
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_vhangup, 111, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_munlockall
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_munlockall, 153, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_yield
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sched_yield, 158, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_gettid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_gettid, 207, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_inotify_init
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_inotify_init, 275, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_exit
++TRACE_SYSCALL_TABLE(sys_exit, sys_exit, 1, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_close
++TRACE_SYSCALL_TABLE(sys_close, sys_close, 6, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lseek
++TRACE_SYSCALL_TABLE(sys_lseek, sys_lseek, 19, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setuid
++TRACE_SYSCALL_TABLE(sys_setuid, sys_setuid, 23, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ptrace
++TRACE_SYSCALL_TABLE(sys_ptrace, sys_ptrace, 26, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_alarm
++TRACE_SYSCALL_TABLE(sys_alarm, sys_alarm, 27, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_nice
++TRACE_SYSCALL_TABLE(sys_nice, sys_nice, 34, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_kill
++TRACE_SYSCALL_TABLE(sys_kill, sys_kill, 37, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_dup
++TRACE_SYSCALL_TABLE(sys_dup, sys_dup, 41, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_brk
++TRACE_SYSCALL_TABLE(sys_brk, sys_brk, 45, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setgid
++TRACE_SYSCALL_TABLE(sys_setgid, sys_setgid, 46, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_signal
++TRACE_SYSCALL_TABLE(sys_signal, sys_signal, 48, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ioctl
++TRACE_SYSCALL_TABLE(sys_ioctl, sys_ioctl, 54, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fcntl
++TRACE_SYSCALL_TABLE(sys_fcntl, sys_fcntl, 55, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setpgid
++TRACE_SYSCALL_TABLE(sys_setpgid, sys_setpgid, 57, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_umask
++TRACE_SYSCALL_TABLE(sys_umask, sys_umask, 60, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_dup2
++TRACE_SYSCALL_TABLE(sys_dup2, sys_dup2, 63, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ssetmask
++TRACE_SYSCALL_TABLE(sys_ssetmask, sys_ssetmask, 69, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setreuid
++TRACE_SYSCALL_TABLE(sys_setreuid, sys_setreuid, 70, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setregid
++TRACE_SYSCALL_TABLE(sys_setregid, sys_setregid, 71, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_munmap
++TRACE_SYSCALL_TABLE(sys_munmap, sys_munmap, 91, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ftruncate
++TRACE_SYSCALL_TABLE(sys_ftruncate, sys_ftruncate, 93, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fchmod
++TRACE_SYSCALL_TABLE(sys_fchmod, sys_fchmod, 94, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fchown
++TRACE_SYSCALL_TABLE(sys_fchown, sys_fchown, 95, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getpriority
++TRACE_SYSCALL_TABLE(sys_getpriority, sys_getpriority, 96, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setpriority
++TRACE_SYSCALL_TABLE(sys_setpriority, sys_setpriority, 97, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fsync
++TRACE_SYSCALL_TABLE(sys_fsync, sys_fsync, 118, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mprotect
++TRACE_SYSCALL_TABLE(sys_mprotect, sys_mprotect, 125, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getpgid
++TRACE_SYSCALL_TABLE(sys_getpgid, sys_getpgid, 132, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fchdir
++TRACE_SYSCALL_TABLE(sys_fchdir, sys_fchdir, 133, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_bdflush
++TRACE_SYSCALL_TABLE(sys_bdflush, sys_bdflush, 134, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sysfs
++TRACE_SYSCALL_TABLE(sys_sysfs, sys_sysfs, 135, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_personality
++TRACE_SYSCALL_TABLE(sys_personality, sys_personality, 136, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setfsuid
++TRACE_SYSCALL_TABLE(sys_setfsuid, sys_setfsuid, 138, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setfsgid
++TRACE_SYSCALL_TABLE(sys_setfsgid, sys_setfsgid, 139, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_flock
++TRACE_SYSCALL_TABLE(sys_flock, sys_flock, 143, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_msync
++TRACE_SYSCALL_TABLE(sys_msync, sys_msync, 144, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getsid
++TRACE_SYSCALL_TABLE(sys_getsid, sys_getsid, 147, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fdatasync
++TRACE_SYSCALL_TABLE(sys_fdatasync, sys_fdatasync, 148, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mlock
++TRACE_SYSCALL_TABLE(sys_mlock, sys_mlock, 150, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_munlock
++TRACE_SYSCALL_TABLE(sys_munlock, sys_munlock, 151, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mlockall
++TRACE_SYSCALL_TABLE(sys_mlockall, sys_mlockall, 152, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_getscheduler
++TRACE_SYSCALL_TABLE(sys_sched_getscheduler, sys_sched_getscheduler, 157, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_get_priority_max
++TRACE_SYSCALL_TABLE(sys_sched_get_priority_max, sys_sched_get_priority_max, 159, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_get_priority_min
++TRACE_SYSCALL_TABLE(sys_sched_get_priority_min, sys_sched_get_priority_min, 160, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mremap
++TRACE_SYSCALL_TABLE(sys_mremap, sys_mremap, 163, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setresuid
++TRACE_SYSCALL_TABLE(sys_setresuid, sys_setresuid, 164, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setresgid
++TRACE_SYSCALL_TABLE(sys_setresgid, sys_setresgid, 169, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_prctl
++TRACE_SYSCALL_TABLE(sys_prctl, sys_prctl, 171, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fcntl64
++TRACE_SYSCALL_TABLE(sys_fcntl64, sys_fcntl64, 204, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_madvise
++TRACE_SYSCALL_TABLE(sys_madvise, sys_madvise, 205, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_tkill
++TRACE_SYSCALL_TABLE(sys_tkill, sys_tkill, 208, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_io_destroy
++TRACE_SYSCALL_TABLE(sys_io_destroy, sys_io_destroy, 228, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_exit_group
++TRACE_SYSCALL_TABLE(sys_exit_group, sys_exit_group, 234, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_epoll_create
++TRACE_SYSCALL_TABLE(sys_epoll_create, sys_epoll_create, 236, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_remap_file_pages
++TRACE_SYSCALL_TABLE(sys_remap_file_pages, sys_remap_file_pages, 239, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timer_getoverrun
++TRACE_SYSCALL_TABLE(sys_timer_getoverrun, sys_timer_getoverrun, 243, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timer_delete
++TRACE_SYSCALL_TABLE(sys_timer_delete, sys_timer_delete, 244, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_tgkill
++TRACE_SYSCALL_TABLE(sys_tgkill, sys_tgkill, 250, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_keyctl
++TRACE_SYSCALL_TABLE(sys_keyctl, sys_keyctl, 271, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ioprio_set
++TRACE_SYSCALL_TABLE(sys_ioprio_set, sys_ioprio_set, 273, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ioprio_get
++TRACE_SYSCALL_TABLE(sys_ioprio_get, sys_ioprio_get, 274, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_inotify_rm_watch
++TRACE_SYSCALL_TABLE(sys_inotify_rm_watch, sys_inotify_rm_watch, 277, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_unshare
++TRACE_SYSCALL_TABLE(sys_unshare, sys_unshare, 282, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_tee
++TRACE_SYSCALL_TABLE(sys_tee, sys_tee, 284, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timerfd_create
++TRACE_SYSCALL_TABLE(sys_timerfd_create, sys_timerfd_create, 306, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_eventfd
++TRACE_SYSCALL_TABLE(sys_eventfd, sys_eventfd, 307, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_eventfd2
++TRACE_SYSCALL_TABLE(sys_eventfd2, sys_eventfd2, 314, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_epoll_create1
++TRACE_SYSCALL_TABLE(sys_epoll_create1, sys_epoll_create1, 315, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_dup3
++TRACE_SYSCALL_TABLE(sys_dup3, sys_dup3, 316, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_inotify_init1
++TRACE_SYSCALL_TABLE(sys_inotify_init1, sys_inotify_init1, 318, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_socket
++TRACE_SYSCALL_TABLE(sys_socket, sys_socket, 326, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_listen
++TRACE_SYSCALL_TABLE(sys_listen, sys_listen, 329, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_shutdown
++TRACE_SYSCALL_TABLE(sys_shutdown, sys_shutdown, 338, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_syncfs
++TRACE_SYSCALL_TABLE(sys_syncfs, sys_syncfs, 348, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setns
++TRACE_SYSCALL_TABLE(sys_setns, sys_setns, 350, 2)
++#endif
++
++#endif /* CREATE_SYSCALL_TABLE */
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/powerpc-32-syscalls-3.0.34_integers_override.h
+@@ -0,0 +1,9 @@
++#ifndef CREATE_SYSCALL_TABLE
++
++#else /* CREATE_SYSCALL_TABLE */
++
++#define OVVERRIDE_TABLE_32_sys_mmap
++TRACE_SYSCALL_TABLE(sys_mmap, sys_mmap, 90, 6)
++
++#endif /* CREATE_SYSCALL_TABLE */
++
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/powerpc-32-syscalls-3.0.34_pointers.h
+@@ -0,0 +1,2316 @@
++/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */
++#ifndef CREATE_SYSCALL_TABLE
++
++#if !defined(_TRACE_SYSCALLS_POINTERS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SYSCALLS_POINTERS_H
++
++#include <linux/tracepoint.h>
++#include <linux/syscalls.h>
++#include "powerpc-32-syscalls-3.0.34_pointers_override.h"
++#include "syscalls_pointers_override.h"
++
++#ifndef OVERRIDE_32_sys_unlink
++SC_TRACE_EVENT(sys_unlink,
++ TP_PROTO(const char * pathname),
++ TP_ARGS(pathname),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_chdir
++SC_TRACE_EVENT(sys_chdir,
++ TP_PROTO(const char * filename),
++ TP_ARGS(filename),
++ TP_STRUCT__entry(__string_from_user(filename, filename)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_time
++SC_TRACE_EVENT(sys_time,
++ TP_PROTO(time_t * tloc),
++ TP_ARGS(tloc),
++ TP_STRUCT__entry(__field_hex(time_t *, tloc)),
++ TP_fast_assign(tp_assign(tloc, tloc)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_oldumount
++SC_TRACE_EVENT(sys_oldumount,
++ TP_PROTO(char * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__string_from_user(name, name)),
++ TP_fast_assign(tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_stime
++SC_TRACE_EVENT(sys_stime,
++ TP_PROTO(time_t * tptr),
++ TP_ARGS(tptr),
++ TP_STRUCT__entry(__field_hex(time_t *, tptr)),
++ TP_fast_assign(tp_assign(tptr, tptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rmdir
++SC_TRACE_EVENT(sys_rmdir,
++ TP_PROTO(const char * pathname),
++ TP_ARGS(pathname),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_pipe
++SC_TRACE_EVENT(sys_pipe,
++ TP_PROTO(int * fildes),
++ TP_ARGS(fildes),
++ TP_STRUCT__entry(__field_hex(int *, fildes)),
++ TP_fast_assign(tp_assign(fildes, fildes)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_times
++SC_TRACE_EVENT(sys_times,
++ TP_PROTO(struct tms * tbuf),
++ TP_ARGS(tbuf),
++ TP_STRUCT__entry(__field_hex(struct tms *, tbuf)),
++ TP_fast_assign(tp_assign(tbuf, tbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_acct
++SC_TRACE_EVENT(sys_acct,
++ TP_PROTO(const char * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__string_from_user(name, name)),
++ TP_fast_assign(tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_olduname
++SC_TRACE_EVENT(sys_olduname,
++ TP_PROTO(struct oldold_utsname * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__field_hex(struct oldold_utsname *, name)),
++ TP_fast_assign(tp_assign(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_chroot
++SC_TRACE_EVENT(sys_chroot,
++ TP_PROTO(const char * filename),
++ TP_ARGS(filename),
++ TP_STRUCT__entry(__string_from_user(filename, filename)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sigpending
++SC_TRACE_EVENT(sys_sigpending,
++ TP_PROTO(old_sigset_t * set),
++ TP_ARGS(set),
++ TP_STRUCT__entry(__field_hex(old_sigset_t *, set)),
++ TP_fast_assign(tp_assign(set, set)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_uselib
++SC_TRACE_EVENT(sys_uselib,
++ TP_PROTO(const char * library),
++ TP_ARGS(library),
++ TP_STRUCT__entry(__field_hex(const char *, library)),
++ TP_fast_assign(tp_assign(library, library)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_uname
++SC_TRACE_EVENT(sys_uname,
++ TP_PROTO(struct old_utsname * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__field_hex(struct old_utsname *, name)),
++ TP_fast_assign(tp_assign(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_swapoff
++SC_TRACE_EVENT(sys_swapoff,
++ TP_PROTO(const char * specialfile),
++ TP_ARGS(specialfile),
++ TP_STRUCT__entry(__string_from_user(specialfile, specialfile)),
++ TP_fast_assign(tp_copy_string_from_user(specialfile, specialfile)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sysinfo
++SC_TRACE_EVENT(sys_sysinfo,
++ TP_PROTO(struct sysinfo * info),
++ TP_ARGS(info),
++ TP_STRUCT__entry(__field_hex(struct sysinfo *, info)),
++ TP_fast_assign(tp_assign(info, info)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_newuname
++SC_TRACE_EVENT(sys_newuname,
++ TP_PROTO(struct new_utsname * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__field_hex(struct new_utsname *, name)),
++ TP_fast_assign(tp_assign(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_adjtimex
++SC_TRACE_EVENT(sys_adjtimex,
++ TP_PROTO(struct timex * txc_p),
++ TP_ARGS(txc_p),
++ TP_STRUCT__entry(__field_hex(struct timex *, txc_p)),
++ TP_fast_assign(tp_assign(txc_p, txc_p)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sysctl
++SC_TRACE_EVENT(sys_sysctl,
++ TP_PROTO(struct __sysctl_args * args),
++ TP_ARGS(args),
++ TP_STRUCT__entry(__field_hex(struct __sysctl_args *, args)),
++ TP_fast_assign(tp_assign(args, args)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_set_tid_address
++SC_TRACE_EVENT(sys_set_tid_address,
++ TP_PROTO(int * tidptr),
++ TP_ARGS(tidptr),
++ TP_STRUCT__entry(__field_hex(int *, tidptr)),
++ TP_fast_assign(tp_assign(tidptr, tidptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mq_unlink
++SC_TRACE_EVENT(sys_mq_unlink,
++ TP_PROTO(const char * u_name),
++ TP_ARGS(u_name),
++ TP_STRUCT__entry(__string_from_user(u_name, u_name)),
++ TP_fast_assign(tp_copy_string_from_user(u_name, u_name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_creat
++SC_TRACE_EVENT(sys_creat,
++ TP_PROTO(const char * pathname, int mode),
++ TP_ARGS(pathname, mode),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(int, mode)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_link
++SC_TRACE_EVENT(sys_link,
++ TP_PROTO(const char * oldname, const char * newname),
++ TP_ARGS(oldname, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_chmod
++SC_TRACE_EVENT(sys_chmod,
++ TP_PROTO(const char * filename, mode_t mode),
++ TP_ARGS(filename, mode),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(mode_t, mode)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_stat
++SC_TRACE_EVENT(sys_stat,
++ TP_PROTO(const char * filename, struct __old_kernel_stat * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct __old_kernel_stat *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fstat
++SC_TRACE_EVENT(sys_fstat,
++ TP_PROTO(unsigned int fd, struct __old_kernel_stat * statbuf),
++ TP_ARGS(fd, statbuf),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct __old_kernel_stat *, statbuf)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_utime
++SC_TRACE_EVENT(sys_utime,
++ TP_PROTO(char * filename, struct utimbuf * times),
++ TP_ARGS(filename, times),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct utimbuf *, times)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(times, times)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_access
++SC_TRACE_EVENT(sys_access,
++ TP_PROTO(const char * filename, int mode),
++ TP_ARGS(filename, mode),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, mode)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rename
++SC_TRACE_EVENT(sys_rename,
++ TP_PROTO(const char * oldname, const char * newname),
++ TP_ARGS(oldname, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mkdir
++SC_TRACE_EVENT(sys_mkdir,
++ TP_PROTO(const char * pathname, int mode),
++ TP_ARGS(pathname, mode),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(int, mode)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_umount
++SC_TRACE_EVENT(sys_umount,
++ TP_PROTO(char * name, int flags),
++ TP_ARGS(name, flags),
++ TP_STRUCT__entry(__string_from_user(name, name) __field(int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ustat
++SC_TRACE_EVENT(sys_ustat,
++ TP_PROTO(unsigned dev, struct ustat * ubuf),
++ TP_ARGS(dev, ubuf),
++ TP_STRUCT__entry(__field(unsigned, dev) __field_hex(struct ustat *, ubuf)),
++ TP_fast_assign(tp_assign(dev, dev) tp_assign(ubuf, ubuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sethostname
++SC_TRACE_EVENT(sys_sethostname,
++ TP_PROTO(char * name, int len),
++ TP_ARGS(name, len),
++ TP_STRUCT__entry(__string_from_user(name, name) __field(int, len)),
++ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setrlimit
++SC_TRACE_EVENT(sys_setrlimit,
++ TP_PROTO(unsigned int resource, struct rlimit * rlim),
++ TP_ARGS(resource, rlim),
++ TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
++ TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_old_getrlimit
++SC_TRACE_EVENT(sys_old_getrlimit,
++ TP_PROTO(unsigned int resource, struct rlimit * rlim),
++ TP_ARGS(resource, rlim),
++ TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
++ TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getrusage
++SC_TRACE_EVENT(sys_getrusage,
++ TP_PROTO(int who, struct rusage * ru),
++ TP_ARGS(who, ru),
++ TP_STRUCT__entry(__field(int, who) __field_hex(struct rusage *, ru)),
++ TP_fast_assign(tp_assign(who, who) tp_assign(ru, ru)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_gettimeofday
++SC_TRACE_EVENT(sys_gettimeofday,
++ TP_PROTO(struct timeval * tv, struct timezone * tz),
++ TP_ARGS(tv, tz),
++ TP_STRUCT__entry(__field_hex(struct timeval *, tv) __field_hex(struct timezone *, tz)),
++ TP_fast_assign(tp_assign(tv, tv) tp_assign(tz, tz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_settimeofday
++SC_TRACE_EVENT(sys_settimeofday,
++ TP_PROTO(struct timeval * tv, struct timezone * tz),
++ TP_ARGS(tv, tz),
++ TP_STRUCT__entry(__field_hex(struct timeval *, tv) __field_hex(struct timezone *, tz)),
++ TP_fast_assign(tp_assign(tv, tv) tp_assign(tz, tz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getgroups
++SC_TRACE_EVENT(sys_getgroups,
++ TP_PROTO(int gidsetsize, gid_t * grouplist),
++ TP_ARGS(gidsetsize, grouplist),
++ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(gid_t *, grouplist)),
++ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setgroups
++SC_TRACE_EVENT(sys_setgroups,
++ TP_PROTO(int gidsetsize, gid_t * grouplist),
++ TP_ARGS(gidsetsize, grouplist),
++ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(gid_t *, grouplist)),
++ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_symlink
++SC_TRACE_EVENT(sys_symlink,
++ TP_PROTO(const char * oldname, const char * newname),
++ TP_ARGS(oldname, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lstat
++SC_TRACE_EVENT(sys_lstat,
++ TP_PROTO(const char * filename, struct __old_kernel_stat * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct __old_kernel_stat *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_swapon
++SC_TRACE_EVENT(sys_swapon,
++ TP_PROTO(const char * specialfile, int swap_flags),
++ TP_ARGS(specialfile, swap_flags),
++ TP_STRUCT__entry(__string_from_user(specialfile, specialfile) __field(int, swap_flags)),
++ TP_fast_assign(tp_copy_string_from_user(specialfile, specialfile) tp_assign(swap_flags, swap_flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_truncate
++SC_TRACE_EVENT(sys_truncate,
++ TP_PROTO(const char * path, long length),
++ TP_ARGS(path, length),
++ TP_STRUCT__entry(__string_from_user(path, path) __field(long, length)),
++ TP_fast_assign(tp_copy_string_from_user(path, path) tp_assign(length, length)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_statfs
++SC_TRACE_EVENT(sys_statfs,
++ TP_PROTO(const char * pathname, struct statfs * buf),
++ TP_ARGS(pathname, buf),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(struct statfs *, buf)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fstatfs
++SC_TRACE_EVENT(sys_fstatfs,
++ TP_PROTO(unsigned int fd, struct statfs * buf),
++ TP_ARGS(fd, buf),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct statfs *, buf)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_socketcall
++SC_TRACE_EVENT(sys_socketcall,
++ TP_PROTO(int call, unsigned long * args),
++ TP_ARGS(call, args),
++ TP_STRUCT__entry(__field(int, call) __field_hex(unsigned long *, args)),
++ TP_fast_assign(tp_assign(call, call) tp_assign(args, args)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getitimer
++SC_TRACE_EVENT(sys_getitimer,
++ TP_PROTO(int which, struct itimerval * value),
++ TP_ARGS(which, value),
++ TP_STRUCT__entry(__field(int, which) __field_hex(struct itimerval *, value)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(value, value)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_newstat
++SC_TRACE_EVENT(sys_newstat,
++ TP_PROTO(const char * filename, struct stat * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_newlstat
++SC_TRACE_EVENT(sys_newlstat,
++ TP_PROTO(const char * filename, struct stat * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_newfstat
++SC_TRACE_EVENT(sys_newfstat,
++ TP_PROTO(unsigned int fd, struct stat * statbuf),
++ TP_ARGS(fd, statbuf),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct stat *, statbuf)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setdomainname
++SC_TRACE_EVENT(sys_setdomainname,
++ TP_PROTO(char * name, int len),
++ TP_ARGS(name, len),
++ TP_STRUCT__entry(__string_from_user(name, name) __field(int, len)),
++ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_delete_module
++SC_TRACE_EVENT(sys_delete_module,
++ TP_PROTO(const char * name_user, unsigned int flags),
++ TP_ARGS(name_user, flags),
++ TP_STRUCT__entry(__string_from_user(name_user, name_user) __field(unsigned int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(name_user, name_user) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_setparam
++SC_TRACE_EVENT(sys_sched_setparam,
++ TP_PROTO(pid_t pid, struct sched_param * param),
++ TP_ARGS(pid, param),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct sched_param *, param)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(param, param)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_getparam
++SC_TRACE_EVENT(sys_sched_getparam,
++ TP_PROTO(pid_t pid, struct sched_param * param),
++ TP_ARGS(pid, param),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct sched_param *, param)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(param, param)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_rr_get_interval
++SC_TRACE_EVENT(sys_sched_rr_get_interval,
++ TP_PROTO(pid_t pid, struct timespec * interval),
++ TP_ARGS(pid, interval),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct timespec *, interval)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(interval, interval)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_nanosleep
++SC_TRACE_EVENT(sys_nanosleep,
++ TP_PROTO(struct timespec * rqtp, struct timespec * rmtp),
++ TP_ARGS(rqtp, rmtp),
++ TP_STRUCT__entry(__field_hex(struct timespec *, rqtp) __field_hex(struct timespec *, rmtp)),
++ TP_fast_assign(tp_assign(rqtp, rqtp) tp_assign(rmtp, rmtp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rt_sigpending
++SC_TRACE_EVENT(sys_rt_sigpending,
++ TP_PROTO(sigset_t * set, size_t sigsetsize),
++ TP_ARGS(set, sigsetsize),
++ TP_STRUCT__entry(__field_hex(sigset_t *, set) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(set, set) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rt_sigsuspend
++SC_TRACE_EVENT(sys_rt_sigsuspend,
++ TP_PROTO(sigset_t * unewset, size_t sigsetsize),
++ TP_ARGS(unewset, sigsetsize),
++ TP_STRUCT__entry(__field_hex(sigset_t *, unewset) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(unewset, unewset) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getcwd
++SC_TRACE_EVENT(sys_getcwd,
++ TP_PROTO(char * buf, unsigned long size),
++ TP_ARGS(buf, size),
++ TP_STRUCT__entry(__field_hex(char *, buf) __field(unsigned long, size)),
++ TP_fast_assign(tp_assign(buf, buf) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getrlimit
++SC_TRACE_EVENT(sys_getrlimit,
++ TP_PROTO(unsigned int resource, struct rlimit * rlim),
++ TP_ARGS(resource, rlim),
++ TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
++ TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_stat64
++SC_TRACE_EVENT(sys_stat64,
++ TP_PROTO(const char * filename, struct stat64 * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat64 *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lstat64
++SC_TRACE_EVENT(sys_lstat64,
++ TP_PROTO(const char * filename, struct stat64 * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat64 *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fstat64
++SC_TRACE_EVENT(sys_fstat64,
++ TP_PROTO(unsigned long fd, struct stat64 * statbuf),
++ TP_ARGS(fd, statbuf),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(struct stat64 *, statbuf)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_pivot_root
++SC_TRACE_EVENT(sys_pivot_root,
++ TP_PROTO(const char * new_root, const char * put_old),
++ TP_ARGS(new_root, put_old),
++ TP_STRUCT__entry(__string_from_user(new_root, new_root) __string_from_user(put_old, put_old)),
++ TP_fast_assign(tp_copy_string_from_user(new_root, new_root) tp_copy_string_from_user(put_old, put_old)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_removexattr
++SC_TRACE_EVENT(sys_removexattr,
++ TP_PROTO(const char * pathname, const char * name),
++ TP_ARGS(pathname, name),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lremovexattr
++SC_TRACE_EVENT(sys_lremovexattr,
++ TP_PROTO(const char * pathname, const char * name),
++ TP_ARGS(pathname, name),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fremovexattr
++SC_TRACE_EVENT(sys_fremovexattr,
++ TP_PROTO(int fd, const char * name),
++ TP_ARGS(fd, name),
++ TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name)),
++ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_io_setup
++SC_TRACE_EVENT(sys_io_setup,
++ TP_PROTO(unsigned nr_events, aio_context_t * ctxp),
++ TP_ARGS(nr_events, ctxp),
++ TP_STRUCT__entry(__field(unsigned, nr_events) __field_hex(aio_context_t *, ctxp)),
++ TP_fast_assign(tp_assign(nr_events, nr_events) tp_assign(ctxp, ctxp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timer_gettime
++SC_TRACE_EVENT(sys_timer_gettime,
++ TP_PROTO(timer_t timer_id, struct itimerspec * setting),
++ TP_ARGS(timer_id, setting),
++ TP_STRUCT__entry(__field(timer_t, timer_id) __field_hex(struct itimerspec *, setting)),
++ TP_fast_assign(tp_assign(timer_id, timer_id) tp_assign(setting, setting)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_clock_settime
++SC_TRACE_EVENT(sys_clock_settime,
++ TP_PROTO(const clockid_t which_clock, const struct timespec * tp),
++ TP_ARGS(which_clock, tp),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(const struct timespec *, tp)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_clock_gettime
++SC_TRACE_EVENT(sys_clock_gettime,
++ TP_PROTO(const clockid_t which_clock, struct timespec * tp),
++ TP_ARGS(which_clock, tp),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timespec *, tp)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_clock_getres
++SC_TRACE_EVENT(sys_clock_getres,
++ TP_PROTO(const clockid_t which_clock, struct timespec * tp),
++ TP_ARGS(which_clock, tp),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timespec *, tp)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_utimes
++SC_TRACE_EVENT(sys_utimes,
++ TP_PROTO(char * filename, struct timeval * utimes),
++ TP_ARGS(filename, utimes),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct timeval *, utimes)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mq_notify
++SC_TRACE_EVENT(sys_mq_notify,
++ TP_PROTO(mqd_t mqdes, const struct sigevent * u_notification),
++ TP_ARGS(mqdes, u_notification),
++ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const struct sigevent *, u_notification)),
++ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_notification, u_notification)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_set_robust_list
++SC_TRACE_EVENT(sys_set_robust_list,
++ TP_PROTO(struct robust_list_head * head, size_t len),
++ TP_ARGS(head, len),
++ TP_STRUCT__entry(__field_hex(struct robust_list_head *, head) __field(size_t, len)),
++ TP_fast_assign(tp_assign(head, head) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timerfd_gettime
++SC_TRACE_EVENT(sys_timerfd_gettime,
++ TP_PROTO(int ufd, struct itimerspec * otmr),
++ TP_ARGS(ufd, otmr),
++ TP_STRUCT__entry(__field(int, ufd) __field_hex(struct itimerspec *, otmr)),
++ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(otmr, otmr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_pipe2
++SC_TRACE_EVENT(sys_pipe2,
++ TP_PROTO(int * fildes, int flags),
++ TP_ARGS(fildes, flags),
++ TP_STRUCT__entry(__field_hex(int *, fildes) __field(int, flags)),
++ TP_fast_assign(tp_assign(fildes, fildes) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_clock_adjtime
++SC_TRACE_EVENT(sys_clock_adjtime,
++ TP_PROTO(const clockid_t which_clock, struct timex * utx),
++ TP_ARGS(which_clock, utx),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timex *, utx)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(utx, utx)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_read
++SC_TRACE_EVENT(sys_read,
++ TP_PROTO(unsigned int fd, char * buf, size_t count),
++ TP_ARGS(fd, buf, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(char *, buf) __field(size_t, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_write
++SC_TRACE_EVENT(sys_write,
++ TP_PROTO(unsigned int fd, const char * buf, size_t count),
++ TP_ARGS(fd, buf, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(const char *, buf) __field(size_t, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_open
++SC_TRACE_EVENT(sys_open,
++ TP_PROTO(const char * filename, int flags, int mode),
++ TP_ARGS(filename, flags, mode),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, flags) __field(int, mode)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(flags, flags) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_waitpid
++SC_TRACE_EVENT(sys_waitpid,
++ TP_PROTO(pid_t pid, int * stat_addr, int options),
++ TP_ARGS(pid, stat_addr, options),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(int *, stat_addr) __field(int, options)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(stat_addr, stat_addr) tp_assign(options, options)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mknod
++SC_TRACE_EVENT(sys_mknod,
++ TP_PROTO(const char * filename, int mode, unsigned dev),
++ TP_ARGS(filename, mode, dev),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, mode) __field(unsigned, dev)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode) tp_assign(dev, dev)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lchown
++SC_TRACE_EVENT(sys_lchown,
++ TP_PROTO(const char * filename, uid_t user, gid_t group),
++ TP_ARGS(filename, user, group),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_readlink
++SC_TRACE_EVENT(sys_readlink,
++ TP_PROTO(const char * path, char * buf, int bufsiz),
++ TP_ARGS(path, buf, bufsiz),
++ TP_STRUCT__entry(__string_from_user(path, path) __field_hex(char *, buf) __field(int, bufsiz)),
++ TP_fast_assign(tp_copy_string_from_user(path, path) tp_assign(buf, buf) tp_assign(bufsiz, bufsiz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_old_readdir
++SC_TRACE_EVENT(sys_old_readdir,
++ TP_PROTO(unsigned int fd, struct old_linux_dirent * dirent, unsigned int count),
++ TP_ARGS(fd, dirent, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct old_linux_dirent *, dirent) __field(unsigned int, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_syslog
++SC_TRACE_EVENT(sys_syslog,
++ TP_PROTO(int type, char * buf, int len),
++ TP_ARGS(type, buf, len),
++ TP_STRUCT__entry(__field(int, type) __field_hex(char *, buf) __field(int, len)),
++ TP_fast_assign(tp_assign(type, type) tp_assign(buf, buf) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setitimer
++SC_TRACE_EVENT(sys_setitimer,
++ TP_PROTO(int which, struct itimerval * value, struct itimerval * ovalue),
++ TP_ARGS(which, value, ovalue),
++ TP_STRUCT__entry(__field(int, which) __field_hex(struct itimerval *, value) __field_hex(struct itimerval *, ovalue)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(value, value) tp_assign(ovalue, ovalue)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sigprocmask
++SC_TRACE_EVENT(sys_sigprocmask,
++ TP_PROTO(int how, old_sigset_t * nset, old_sigset_t * oset),
++ TP_ARGS(how, nset, oset),
++ TP_STRUCT__entry(__field(int, how) __field_hex(old_sigset_t *, nset) __field_hex(old_sigset_t *, oset)),
++ TP_fast_assign(tp_assign(how, how) tp_assign(nset, nset) tp_assign(oset, oset)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_init_module
++SC_TRACE_EVENT(sys_init_module,
++ TP_PROTO(void * umod, unsigned long len, const char * uargs),
++ TP_ARGS(umod, len, uargs),
++ TP_STRUCT__entry(__field_hex(void *, umod) __field(unsigned long, len) __field_hex(const char *, uargs)),
++ TP_fast_assign(tp_assign(umod, umod) tp_assign(len, len) tp_assign(uargs, uargs)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getdents
++SC_TRACE_EVENT(sys_getdents,
++ TP_PROTO(unsigned int fd, struct linux_dirent * dirent, unsigned int count),
++ TP_ARGS(fd, dirent, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct linux_dirent *, dirent) __field(unsigned int, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_readv
++SC_TRACE_EVENT(sys_readv,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen),
++ TP_ARGS(fd, vec, vlen),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_writev
++SC_TRACE_EVENT(sys_writev,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen),
++ TP_ARGS(fd, vec, vlen),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_setscheduler
++SC_TRACE_EVENT(sys_sched_setscheduler,
++ TP_PROTO(pid_t pid, int policy, struct sched_param * param),
++ TP_ARGS(pid, policy, param),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, policy) __field_hex(struct sched_param *, param)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(policy, policy) tp_assign(param, param)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getresuid
++SC_TRACE_EVENT(sys_getresuid,
++ TP_PROTO(uid_t * ruid, uid_t * euid, uid_t * suid),
++ TP_ARGS(ruid, euid, suid),
++ TP_STRUCT__entry(__field_hex(uid_t *, ruid) __field_hex(uid_t *, euid) __field_hex(uid_t *, suid)),
++ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_poll
++SC_TRACE_EVENT(sys_poll,
++ TP_PROTO(struct pollfd * ufds, unsigned int nfds, long timeout_msecs),
++ TP_ARGS(ufds, nfds, timeout_msecs),
++ TP_STRUCT__entry(__field_hex(struct pollfd *, ufds) __field(unsigned int, nfds) __field(long, timeout_msecs)),
++ TP_fast_assign(tp_assign(ufds, ufds) tp_assign(nfds, nfds) tp_assign(timeout_msecs, timeout_msecs)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_nfsservctl
++SC_TRACE_EVENT(sys_nfsservctl,
++ TP_PROTO(int cmd, struct nfsctl_arg * arg, void * res),
++ TP_ARGS(cmd, arg, res),
++ TP_STRUCT__entry(__field(int, cmd) __field_hex(struct nfsctl_arg *, arg) __field_hex(void *, res)),
++ TP_fast_assign(tp_assign(cmd, cmd) tp_assign(arg, arg) tp_assign(res, res)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getresgid
++SC_TRACE_EVENT(sys_getresgid,
++ TP_PROTO(gid_t * rgid, gid_t * egid, gid_t * sgid),
++ TP_ARGS(rgid, egid, sgid),
++ TP_STRUCT__entry(__field_hex(gid_t *, rgid) __field_hex(gid_t *, egid) __field_hex(gid_t *, sgid)),
++ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rt_sigqueueinfo
++SC_TRACE_EVENT(sys_rt_sigqueueinfo,
++ TP_PROTO(pid_t pid, int sig, siginfo_t * uinfo),
++ TP_ARGS(pid, sig, uinfo),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig) __field_hex(siginfo_t *, uinfo)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig) tp_assign(uinfo, uinfo)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_chown
++SC_TRACE_EVENT(sys_chown,
++ TP_PROTO(const char * filename, uid_t user, gid_t group),
++ TP_ARGS(filename, user, group),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getdents64
++SC_TRACE_EVENT(sys_getdents64,
++ TP_PROTO(unsigned int fd, struct linux_dirent64 * dirent, unsigned int count),
++ TP_ARGS(fd, dirent, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct linux_dirent64 *, dirent) __field(unsigned int, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mincore
++SC_TRACE_EVENT(sys_mincore,
++ TP_PROTO(unsigned long start, size_t len, unsigned char * vec),
++ TP_ARGS(start, len, vec),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field_hex(unsigned char *, vec)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(vec, vec)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_listxattr
++SC_TRACE_EVENT(sys_listxattr,
++ TP_PROTO(const char * pathname, char * list, size_t size),
++ TP_ARGS(pathname, list, size),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(char *, list) __field(size_t, size)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(list, list) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_llistxattr
++SC_TRACE_EVENT(sys_llistxattr,
++ TP_PROTO(const char * pathname, char * list, size_t size),
++ TP_ARGS(pathname, list, size),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(char *, list) __field(size_t, size)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(list, list) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_flistxattr
++SC_TRACE_EVENT(sys_flistxattr,
++ TP_PROTO(int fd, char * list, size_t size),
++ TP_ARGS(fd, list, size),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(char *, list) __field(size_t, size)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(list, list) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_setaffinity
++SC_TRACE_EVENT(sys_sched_setaffinity,
++ TP_PROTO(pid_t pid, unsigned int len, unsigned long * user_mask_ptr),
++ TP_ARGS(pid, len, user_mask_ptr),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, len) __field_hex(unsigned long *, user_mask_ptr)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(len, len) tp_assign(user_mask_ptr, user_mask_ptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_getaffinity
++SC_TRACE_EVENT(sys_sched_getaffinity,
++ TP_PROTO(pid_t pid, unsigned int len, unsigned long * user_mask_ptr),
++ TP_ARGS(pid, len, user_mask_ptr),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, len) __field_hex(unsigned long *, user_mask_ptr)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(len, len) tp_assign(user_mask_ptr, user_mask_ptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_io_submit
++SC_TRACE_EVENT(sys_io_submit,
++ TP_PROTO(aio_context_t ctx_id, long nr, struct iocb * * iocbpp),
++ TP_ARGS(ctx_id, nr, iocbpp),
++ TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field(long, nr) __field_hex(struct iocb * *, iocbpp)),
++ TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(nr, nr) tp_assign(iocbpp, iocbpp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_io_cancel
++SC_TRACE_EVENT(sys_io_cancel,
++ TP_PROTO(aio_context_t ctx_id, struct iocb * iocb, struct io_event * result),
++ TP_ARGS(ctx_id, iocb, result),
++ TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field_hex(struct iocb *, iocb) __field_hex(struct io_event *, result)),
++ TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(iocb, iocb) tp_assign(result, result)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timer_create
++SC_TRACE_EVENT(sys_timer_create,
++ TP_PROTO(const clockid_t which_clock, struct sigevent * timer_event_spec, timer_t * created_timer_id),
++ TP_ARGS(which_clock, timer_event_spec, created_timer_id),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct sigevent *, timer_event_spec) __field_hex(timer_t *, created_timer_id)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(timer_event_spec, timer_event_spec) tp_assign(created_timer_id, created_timer_id)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_statfs64
++SC_TRACE_EVENT(sys_statfs64,
++ TP_PROTO(const char * pathname, size_t sz, struct statfs64 * buf),
++ TP_ARGS(pathname, sz, buf),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(size_t, sz) __field_hex(struct statfs64 *, buf)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(sz, sz) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fstatfs64
++SC_TRACE_EVENT(sys_fstatfs64,
++ TP_PROTO(unsigned int fd, size_t sz, struct statfs64 * buf),
++ TP_ARGS(fd, sz, buf),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(size_t, sz) __field_hex(struct statfs64 *, buf)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(sz, sz) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mq_getsetattr
++SC_TRACE_EVENT(sys_mq_getsetattr,
++ TP_PROTO(mqd_t mqdes, const struct mq_attr * u_mqstat, struct mq_attr * u_omqstat),
++ TP_ARGS(mqdes, u_mqstat, u_omqstat),
++ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const struct mq_attr *, u_mqstat) __field_hex(struct mq_attr *, u_omqstat)),
++ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_mqstat, u_mqstat) tp_assign(u_omqstat, u_omqstat)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_inotify_add_watch
++SC_TRACE_EVENT(sys_inotify_add_watch,
++ TP_PROTO(int fd, const char * pathname, u32 mask),
++ TP_ARGS(fd, pathname, mask),
++ TP_STRUCT__entry(__field(int, fd) __string_from_user(pathname, pathname) __field(u32, mask)),
++ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(pathname, pathname) tp_assign(mask, mask)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mkdirat
++SC_TRACE_EVENT(sys_mkdirat,
++ TP_PROTO(int dfd, const char * pathname, int mode),
++ TP_ARGS(dfd, pathname, mode),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field(int, mode)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_futimesat
++SC_TRACE_EVENT(sys_futimesat,
++ TP_PROTO(int dfd, const char * filename, struct timeval * utimes),
++ TP_ARGS(dfd, filename, utimes),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct timeval *, utimes)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_unlinkat
++SC_TRACE_EVENT(sys_unlinkat,
++ TP_PROTO(int dfd, const char * pathname, int flag),
++ TP_ARGS(dfd, pathname, flag),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field(int, flag)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(flag, flag)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_symlinkat
++SC_TRACE_EVENT(sys_symlinkat,
++ TP_PROTO(const char * oldname, int newdfd, const char * newname),
++ TP_ARGS(oldname, newdfd, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fchmodat
++SC_TRACE_EVENT(sys_fchmodat,
++ TP_PROTO(int dfd, const char * filename, mode_t mode),
++ TP_ARGS(dfd, filename, mode),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(mode_t, mode)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_faccessat
++SC_TRACE_EVENT(sys_faccessat,
++ TP_PROTO(int dfd, const char * filename, int mode),
++ TP_ARGS(dfd, filename, mode),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, mode)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_get_robust_list
++SC_TRACE_EVENT(sys_get_robust_list,
++ TP_PROTO(int pid, struct robust_list_head * * head_ptr, size_t * len_ptr),
++ TP_ARGS(pid, head_ptr, len_ptr),
++ TP_STRUCT__entry(__field(int, pid) __field_hex(struct robust_list_head * *, head_ptr) __field_hex(size_t *, len_ptr)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(head_ptr, head_ptr) tp_assign(len_ptr, len_ptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getcpu
++SC_TRACE_EVENT(sys_getcpu,
++ TP_PROTO(unsigned * cpup, unsigned * nodep, struct getcpu_cache * unused),
++ TP_ARGS(cpup, nodep, unused),
++ TP_STRUCT__entry(__field_hex(unsigned *, cpup) __field_hex(unsigned *, nodep) __field_hex(struct getcpu_cache *, unused)),
++ TP_fast_assign(tp_assign(cpup, cpup) tp_assign(nodep, nodep) tp_assign(unused, unused)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_signalfd
++SC_TRACE_EVENT(sys_signalfd,
++ TP_PROTO(int ufd, sigset_t * user_mask, size_t sizemask),
++ TP_ARGS(ufd, user_mask, sizemask),
++ TP_STRUCT__entry(__field(int, ufd) __field_hex(sigset_t *, user_mask) __field(size_t, sizemask)),
++ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(user_mask, user_mask) tp_assign(sizemask, sizemask)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_bind
++SC_TRACE_EVENT(sys_bind,
++ TP_PROTO(int fd, struct sockaddr * umyaddr, int addrlen),
++ TP_ARGS(fd, umyaddr, addrlen),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, umyaddr) __field_hex(int, addrlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(umyaddr, umyaddr) tp_assign(addrlen, addrlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_connect
++SC_TRACE_EVENT(sys_connect,
++ TP_PROTO(int fd, struct sockaddr * uservaddr, int addrlen),
++ TP_ARGS(fd, uservaddr, addrlen),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, uservaddr) __field_hex(int, addrlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(uservaddr, uservaddr) tp_assign(addrlen, addrlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_accept
++SC_TRACE_EVENT(sys_accept,
++ TP_PROTO(int fd, struct sockaddr * upeer_sockaddr, int * upeer_addrlen),
++ TP_ARGS(fd, upeer_sockaddr, upeer_addrlen),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, upeer_sockaddr) __field_hex(int *, upeer_addrlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(upeer_sockaddr, upeer_sockaddr) tp_assign(upeer_addrlen, upeer_addrlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getsockname
++SC_TRACE_EVENT(sys_getsockname,
++ TP_PROTO(int fd, struct sockaddr * usockaddr, int * usockaddr_len),
++ TP_ARGS(fd, usockaddr, usockaddr_len),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, usockaddr) __field_hex(int *, usockaddr_len)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(usockaddr, usockaddr) tp_assign(usockaddr_len, usockaddr_len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getpeername
++SC_TRACE_EVENT(sys_getpeername,
++ TP_PROTO(int fd, struct sockaddr * usockaddr, int * usockaddr_len),
++ TP_ARGS(fd, usockaddr, usockaddr_len),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, usockaddr) __field_hex(int *, usockaddr_len)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(usockaddr, usockaddr) tp_assign(usockaddr_len, usockaddr_len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sendmsg
++SC_TRACE_EVENT(sys_sendmsg,
++ TP_PROTO(int fd, struct msghdr * msg, unsigned flags),
++ TP_ARGS(fd, msg, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct msghdr *, msg) __field(unsigned, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(msg, msg) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_recvmsg
++SC_TRACE_EVENT(sys_recvmsg,
++ TP_PROTO(int fd, struct msghdr * msg, unsigned int flags),
++ TP_ARGS(fd, msg, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct msghdr *, msg) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(msg, msg) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_reboot
++SC_TRACE_EVENT(sys_reboot,
++ TP_PROTO(int magic1, int magic2, unsigned int cmd, void * arg),
++ TP_ARGS(magic1, magic2, cmd, arg),
++ TP_STRUCT__entry(__field(int, magic1) __field(int, magic2) __field(unsigned int, cmd) __field_hex(void *, arg)),
++ TP_fast_assign(tp_assign(magic1, magic1) tp_assign(magic2, magic2) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_wait4
++SC_TRACE_EVENT(sys_wait4,
++ TP_PROTO(pid_t upid, int * stat_addr, int options, struct rusage * ru),
++ TP_ARGS(upid, stat_addr, options, ru),
++ TP_STRUCT__entry(__field(pid_t, upid) __field_hex(int *, stat_addr) __field(int, options) __field_hex(struct rusage *, ru)),
++ TP_fast_assign(tp_assign(upid, upid) tp_assign(stat_addr, stat_addr) tp_assign(options, options) tp_assign(ru, ru)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rt_sigaction
++SC_TRACE_EVENT(sys_rt_sigaction,
++ TP_PROTO(int sig, const struct sigaction * act, struct sigaction * oact, size_t sigsetsize),
++ TP_ARGS(sig, act, oact, sigsetsize),
++ TP_STRUCT__entry(__field(int, sig) __field_hex(const struct sigaction *, act) __field_hex(struct sigaction *, oact) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(sig, sig) tp_assign(act, act) tp_assign(oact, oact) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rt_sigprocmask
++SC_TRACE_EVENT(sys_rt_sigprocmask,
++ TP_PROTO(int how, sigset_t * nset, sigset_t * oset, size_t sigsetsize),
++ TP_ARGS(how, nset, oset, sigsetsize),
++ TP_STRUCT__entry(__field(int, how) __field_hex(sigset_t *, nset) __field_hex(sigset_t *, oset) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(how, how) tp_assign(nset, nset) tp_assign(oset, oset) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rt_sigtimedwait
++SC_TRACE_EVENT(sys_rt_sigtimedwait,
++ TP_PROTO(const sigset_t * uthese, siginfo_t * uinfo, const struct timespec * uts, size_t sigsetsize),
++ TP_ARGS(uthese, uinfo, uts, sigsetsize),
++ TP_STRUCT__entry(__field_hex(const sigset_t *, uthese) __field_hex(siginfo_t *, uinfo) __field_hex(const struct timespec *, uts) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(uthese, uthese) tp_assign(uinfo, uinfo) tp_assign(uts, uts) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sendfile
++SC_TRACE_EVENT(sys_sendfile,
++ TP_PROTO(int out_fd, int in_fd, off_t * offset, size_t count),
++ TP_ARGS(out_fd, in_fd, offset, count),
++ TP_STRUCT__entry(__field(int, out_fd) __field(int, in_fd) __field_hex(off_t *, offset) __field(size_t, count)),
++ TP_fast_assign(tp_assign(out_fd, out_fd) tp_assign(in_fd, in_fd) tp_assign(offset, offset) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getxattr
++SC_TRACE_EVENT(sys_getxattr,
++ TP_PROTO(const char * pathname, const char * name, void * value, size_t size),
++ TP_ARGS(pathname, name, value, size),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lgetxattr
++SC_TRACE_EVENT(sys_lgetxattr,
++ TP_PROTO(const char * pathname, const char * name, void * value, size_t size),
++ TP_ARGS(pathname, name, value, size),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fgetxattr
++SC_TRACE_EVENT(sys_fgetxattr,
++ TP_PROTO(int fd, const char * name, void * value, size_t size),
++ TP_ARGS(fd, name, value, size),
++ TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
++ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sendfile64
++SC_TRACE_EVENT(sys_sendfile64,
++ TP_PROTO(int out_fd, int in_fd, loff_t * offset, size_t count),
++ TP_ARGS(out_fd, in_fd, offset, count),
++ TP_STRUCT__entry(__field(int, out_fd) __field(int, in_fd) __field_hex(loff_t *, offset) __field(size_t, count)),
++ TP_fast_assign(tp_assign(out_fd, out_fd) tp_assign(in_fd, in_fd) tp_assign(offset, offset) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_epoll_ctl
++SC_TRACE_EVENT(sys_epoll_ctl,
++ TP_PROTO(int epfd, int op, int fd, struct epoll_event * event),
++ TP_ARGS(epfd, op, fd, event),
++ TP_STRUCT__entry(__field(int, epfd) __field(int, op) __field(int, fd) __field_hex(struct epoll_event *, event)),
++ TP_fast_assign(tp_assign(epfd, epfd) tp_assign(op, op) tp_assign(fd, fd) tp_assign(event, event)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_epoll_wait
++SC_TRACE_EVENT(sys_epoll_wait,
++ TP_PROTO(int epfd, struct epoll_event * events, int maxevents, int timeout),
++ TP_ARGS(epfd, events, maxevents, timeout),
++ TP_STRUCT__entry(__field(int, epfd) __field_hex(struct epoll_event *, events) __field(int, maxevents) __field(int, timeout)),
++ TP_fast_assign(tp_assign(epfd, epfd) tp_assign(events, events) tp_assign(maxevents, maxevents) tp_assign(timeout, timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timer_settime
++SC_TRACE_EVENT(sys_timer_settime,
++ TP_PROTO(timer_t timer_id, int flags, const struct itimerspec * new_setting, struct itimerspec * old_setting),
++ TP_ARGS(timer_id, flags, new_setting, old_setting),
++ TP_STRUCT__entry(__field(timer_t, timer_id) __field(int, flags) __field_hex(const struct itimerspec *, new_setting) __field_hex(struct itimerspec *, old_setting)),
++ TP_fast_assign(tp_assign(timer_id, timer_id) tp_assign(flags, flags) tp_assign(new_setting, new_setting) tp_assign(old_setting, old_setting)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_clock_nanosleep
++SC_TRACE_EVENT(sys_clock_nanosleep,
++ TP_PROTO(const clockid_t which_clock, int flags, const struct timespec * rqtp, struct timespec * rmtp),
++ TP_ARGS(which_clock, flags, rqtp, rmtp),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field(int, flags) __field_hex(const struct timespec *, rqtp) __field_hex(struct timespec *, rmtp)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(flags, flags) tp_assign(rqtp, rqtp) tp_assign(rmtp, rmtp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mq_open
++SC_TRACE_EVENT(sys_mq_open,
++ TP_PROTO(const char * u_name, int oflag, mode_t mode, struct mq_attr * u_attr),
++ TP_ARGS(u_name, oflag, mode, u_attr),
++ TP_STRUCT__entry(__string_from_user(u_name, u_name) __field(int, oflag) __field(mode_t, mode) __field_hex(struct mq_attr *, u_attr)),
++ TP_fast_assign(tp_copy_string_from_user(u_name, u_name) tp_assign(oflag, oflag) tp_assign(mode, mode) tp_assign(u_attr, u_attr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_request_key
++SC_TRACE_EVENT(sys_request_key,
++ TP_PROTO(const char * _type, const char * _description, const char * _callout_info, key_serial_t destringid),
++ TP_ARGS(_type, _description, _callout_info, destringid),
++ TP_STRUCT__entry(__string_from_user(_type, _type) __field_hex(const char *, _description) __field_hex(const char *, _callout_info) __field(key_serial_t, destringid)),
++ TP_fast_assign(tp_copy_string_from_user(_type, _type) tp_assign(_description, _description) tp_assign(_callout_info, _callout_info) tp_assign(destringid, destringid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_vmsplice
++SC_TRACE_EVENT(sys_vmsplice,
++ TP_PROTO(int fd, const struct iovec * iov, unsigned long nr_segs, unsigned int flags),
++ TP_ARGS(fd, iov, nr_segs, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(const struct iovec *, iov) __field(unsigned long, nr_segs) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(iov, iov) tp_assign(nr_segs, nr_segs) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_openat
++SC_TRACE_EVENT(sys_openat,
++ TP_PROTO(int dfd, const char * filename, int flags, int mode),
++ TP_ARGS(dfd, filename, flags, mode),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, flags) __field(int, mode)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(flags, flags) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mknodat
++SC_TRACE_EVENT(sys_mknodat,
++ TP_PROTO(int dfd, const char * filename, int mode, unsigned dev),
++ TP_ARGS(dfd, filename, mode, dev),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, mode) __field(unsigned, dev)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode) tp_assign(dev, dev)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fstatat64
++SC_TRACE_EVENT(sys_fstatat64,
++ TP_PROTO(int dfd, const char * filename, struct stat64 * statbuf, int flag),
++ TP_ARGS(dfd, filename, statbuf, flag),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct stat64 *, statbuf) __field(int, flag)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf) tp_assign(flag, flag)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_renameat
++SC_TRACE_EVENT(sys_renameat,
++ TP_PROTO(int olddfd, const char * oldname, int newdfd, const char * newname),
++ TP_ARGS(olddfd, oldname, newdfd, newname),
++ TP_STRUCT__entry(__field(int, olddfd) __string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_assign(olddfd, olddfd) tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_readlinkat
++SC_TRACE_EVENT(sys_readlinkat,
++ TP_PROTO(int dfd, const char * pathname, char * buf, int bufsiz),
++ TP_ARGS(dfd, pathname, buf, bufsiz),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field_hex(char *, buf) __field(int, bufsiz)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(buf, buf) tp_assign(bufsiz, bufsiz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_utimensat
++SC_TRACE_EVENT(sys_utimensat,
++ TP_PROTO(int dfd, const char * filename, struct timespec * utimes, int flags),
++ TP_ARGS(dfd, filename, utimes, flags),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct timespec *, utimes) __field(int, flags)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timerfd_settime
++SC_TRACE_EVENT(sys_timerfd_settime,
++ TP_PROTO(int ufd, int flags, const struct itimerspec * utmr, struct itimerspec * otmr),
++ TP_ARGS(ufd, flags, utmr, otmr),
++ TP_STRUCT__entry(__field(int, ufd) __field(int, flags) __field_hex(const struct itimerspec *, utmr) __field_hex(struct itimerspec *, otmr)),
++ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(flags, flags) tp_assign(utmr, utmr) tp_assign(otmr, otmr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_signalfd4
++SC_TRACE_EVENT(sys_signalfd4,
++ TP_PROTO(int ufd, sigset_t * user_mask, size_t sizemask, int flags),
++ TP_ARGS(ufd, user_mask, sizemask, flags),
++ TP_STRUCT__entry(__field(int, ufd) __field_hex(sigset_t *, user_mask) __field(size_t, sizemask) __field(int, flags)),
++ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(user_mask, user_mask) tp_assign(sizemask, sizemask) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rt_tgsigqueueinfo
++SC_TRACE_EVENT(sys_rt_tgsigqueueinfo,
++ TP_PROTO(pid_t tgid, pid_t pid, int sig, siginfo_t * uinfo),
++ TP_ARGS(tgid, pid, sig, uinfo),
++ TP_STRUCT__entry(__field(pid_t, tgid) __field(pid_t, pid) __field(int, sig) __field_hex(siginfo_t *, uinfo)),
++ TP_fast_assign(tp_assign(tgid, tgid) tp_assign(pid, pid) tp_assign(sig, sig) tp_assign(uinfo, uinfo)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_prlimit64
++SC_TRACE_EVENT(sys_prlimit64,
++ TP_PROTO(pid_t pid, unsigned int resource, const struct rlimit64 * new_rlim, struct rlimit64 * old_rlim),
++ TP_ARGS(pid, resource, new_rlim, old_rlim),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, resource) __field_hex(const struct rlimit64 *, new_rlim) __field_hex(struct rlimit64 *, old_rlim)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(resource, resource) tp_assign(new_rlim, new_rlim) tp_assign(old_rlim, old_rlim)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_socketpair
++SC_TRACE_EVENT(sys_socketpair,
++ TP_PROTO(int family, int type, int protocol, int * usockvec),
++ TP_ARGS(family, type, protocol, usockvec),
++ TP_STRUCT__entry(__field(int, family) __field(int, type) __field(int, protocol) __field_hex(int *, usockvec)),
++ TP_fast_assign(tp_assign(family, family) tp_assign(type, type) tp_assign(protocol, protocol) tp_assign(usockvec, usockvec)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_send
++SC_TRACE_EVENT(sys_send,
++ TP_PROTO(int fd, void * buff, size_t len, unsigned flags),
++ TP_ARGS(fd, buff, len, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(void *, buff) __field(size_t, len) __field(unsigned, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buff, buff) tp_assign(len, len) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_accept4
++SC_TRACE_EVENT(sys_accept4,
++ TP_PROTO(int fd, struct sockaddr * upeer_sockaddr, int * upeer_addrlen, int flags),
++ TP_ARGS(fd, upeer_sockaddr, upeer_addrlen, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, upeer_sockaddr) __field_hex(int *, upeer_addrlen) __field(int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(upeer_sockaddr, upeer_sockaddr) tp_assign(upeer_addrlen, upeer_addrlen) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sendmmsg
++SC_TRACE_EVENT(sys_sendmmsg,
++ TP_PROTO(int fd, struct mmsghdr * mmsg, unsigned int vlen, unsigned int flags),
++ TP_ARGS(fd, mmsg, vlen, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct mmsghdr *, mmsg) __field(unsigned int, vlen) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(mmsg, mmsg) tp_assign(vlen, vlen) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mount
++SC_TRACE_EVENT(sys_mount,
++ TP_PROTO(char * dev_name, char * dir_name, char * type, unsigned long flags, void * data),
++ TP_ARGS(dev_name, dir_name, type, flags, data),
++ TP_STRUCT__entry(__string_from_user(dev_name, dev_name) __string_from_user(dir_name, dir_name) __string_from_user(type, type) __field(unsigned long, flags) __field_hex(void *, data)),
++ TP_fast_assign(tp_copy_string_from_user(dev_name, dev_name) tp_copy_string_from_user(dir_name, dir_name) tp_copy_string_from_user(type, type) tp_assign(flags, flags) tp_assign(data, data)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_llseek
++SC_TRACE_EVENT(sys_llseek,
++ TP_PROTO(unsigned int fd, unsigned long offset_high, unsigned long offset_low, loff_t * result, unsigned int origin),
++ TP_ARGS(fd, offset_high, offset_low, result, origin),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned long, offset_high) __field(unsigned long, offset_low) __field_hex(loff_t *, result) __field(unsigned int, origin)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(offset_high, offset_high) tp_assign(offset_low, offset_low) tp_assign(result, result) tp_assign(origin, origin)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_select
++SC_TRACE_EVENT(sys_select,
++ TP_PROTO(int n, fd_set * inp, fd_set * outp, fd_set * exp, struct timeval * tvp),
++ TP_ARGS(n, inp, outp, exp, tvp),
++ TP_STRUCT__entry(__field(int, n) __field_hex(fd_set *, inp) __field_hex(fd_set *, outp) __field_hex(fd_set *, exp) __field_hex(struct timeval *, tvp)),
++ TP_fast_assign(tp_assign(n, n) tp_assign(inp, inp) tp_assign(outp, outp) tp_assign(exp, exp) tp_assign(tvp, tvp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_pciconfig_read
++SC_TRACE_EVENT(sys_pciconfig_read,
++ TP_PROTO(unsigned long bus, unsigned long dfn, unsigned long off, unsigned long len, void * buf),
++ TP_ARGS(bus, dfn, off, len, buf),
++ TP_STRUCT__entry(__field(unsigned long, bus) __field(unsigned long, dfn) __field(unsigned long, off) __field(unsigned long, len) __field_hex(void *, buf)),
++ TP_fast_assign(tp_assign(bus, bus) tp_assign(dfn, dfn) tp_assign(off, off) tp_assign(len, len) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_pciconfig_write
++SC_TRACE_EVENT(sys_pciconfig_write,
++ TP_PROTO(unsigned long bus, unsigned long dfn, unsigned long off, unsigned long len, void * buf),
++ TP_ARGS(bus, dfn, off, len, buf),
++ TP_STRUCT__entry(__field(unsigned long, bus) __field(unsigned long, dfn) __field(unsigned long, off) __field(unsigned long, len) __field_hex(void *, buf)),
++ TP_fast_assign(tp_assign(bus, bus) tp_assign(dfn, dfn) tp_assign(off, off) tp_assign(len, len) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setxattr
++SC_TRACE_EVENT(sys_setxattr,
++ TP_PROTO(const char * pathname, const char * name, const void * value, size_t size, int flags),
++ TP_ARGS(pathname, name, value, size, flags),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lsetxattr
++SC_TRACE_EVENT(sys_lsetxattr,
++ TP_PROTO(const char * pathname, const char * name, const void * value, size_t size, int flags),
++ TP_ARGS(pathname, name, value, size, flags),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fsetxattr
++SC_TRACE_EVENT(sys_fsetxattr,
++ TP_PROTO(int fd, const char * name, const void * value, size_t size, int flags),
++ TP_ARGS(fd, name, value, size, flags),
++ TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_io_getevents
++SC_TRACE_EVENT(sys_io_getevents,
++ TP_PROTO(aio_context_t ctx_id, long min_nr, long nr, struct io_event * events, struct timespec * timeout),
++ TP_ARGS(ctx_id, min_nr, nr, events, timeout),
++ TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field(long, min_nr) __field(long, nr) __field_hex(struct io_event *, events) __field_hex(struct timespec *, timeout)),
++ TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(min_nr, min_nr) tp_assign(nr, nr) tp_assign(events, events) tp_assign(timeout, timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mq_timedsend
++SC_TRACE_EVENT(sys_mq_timedsend,
++ TP_PROTO(mqd_t mqdes, const char * u_msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec * u_abs_timeout),
++ TP_ARGS(mqdes, u_msg_ptr, msg_len, msg_prio, u_abs_timeout),
++ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const char *, u_msg_ptr) __field(size_t, msg_len) __field(unsigned int, msg_prio) __field_hex(const struct timespec *, u_abs_timeout)),
++ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_msg_ptr, u_msg_ptr) tp_assign(msg_len, msg_len) tp_assign(msg_prio, msg_prio) tp_assign(u_abs_timeout, u_abs_timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mq_timedreceive
++SC_TRACE_EVENT(sys_mq_timedreceive,
++ TP_PROTO(mqd_t mqdes, char * u_msg_ptr, size_t msg_len, unsigned int * u_msg_prio, const struct timespec * u_abs_timeout),
++ TP_ARGS(mqdes, u_msg_ptr, msg_len, u_msg_prio, u_abs_timeout),
++ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(char *, u_msg_ptr) __field(size_t, msg_len) __field_hex(unsigned int *, u_msg_prio) __field_hex(const struct timespec *, u_abs_timeout)),
++ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_msg_ptr, u_msg_ptr) tp_assign(msg_len, msg_len) tp_assign(u_msg_prio, u_msg_prio) tp_assign(u_abs_timeout, u_abs_timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_add_key
++SC_TRACE_EVENT(sys_add_key,
++ TP_PROTO(const char * _type, const char * _description, const void * _payload, size_t plen, key_serial_t ringid),
++ TP_ARGS(_type, _description, _payload, plen, ringid),
++ TP_STRUCT__entry(__string_from_user(_type, _type) __field_hex(const char *, _description) __field_hex(const void *, _payload) __field(size_t, plen) __field(key_serial_t, ringid)),
++ TP_fast_assign(tp_copy_string_from_user(_type, _type) tp_assign(_description, _description) tp_assign(_payload, _payload) tp_assign(plen, plen) tp_assign(ringid, ringid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_waitid
++SC_TRACE_EVENT(sys_waitid,
++ TP_PROTO(int which, pid_t upid, struct siginfo * infop, int options, struct rusage * ru),
++ TP_ARGS(which, upid, infop, options, ru),
++ TP_STRUCT__entry(__field(int, which) __field(pid_t, upid) __field_hex(struct siginfo *, infop) __field(int, options) __field_hex(struct rusage *, ru)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(upid, upid) tp_assign(infop, infop) tp_assign(options, options) tp_assign(ru, ru)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ppoll
++SC_TRACE_EVENT(sys_ppoll,
++ TP_PROTO(struct pollfd * ufds, unsigned int nfds, struct timespec * tsp, const sigset_t * sigmask, size_t sigsetsize),
++ TP_ARGS(ufds, nfds, tsp, sigmask, sigsetsize),
++ TP_STRUCT__entry(__field_hex(struct pollfd *, ufds) __field(unsigned int, nfds) __field_hex(struct timespec *, tsp) __field_hex(const sigset_t *, sigmask) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(ufds, ufds) tp_assign(nfds, nfds) tp_assign(tsp, tsp) tp_assign(sigmask, sigmask) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fchownat
++SC_TRACE_EVENT(sys_fchownat,
++ TP_PROTO(int dfd, const char * filename, uid_t user, gid_t group, int flag),
++ TP_ARGS(dfd, filename, user, group, flag),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group) __field(int, flag)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group) tp_assign(flag, flag)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_linkat
++SC_TRACE_EVENT(sys_linkat,
++ TP_PROTO(int olddfd, const char * oldname, int newdfd, const char * newname, int flags),
++ TP_ARGS(olddfd, oldname, newdfd, newname, flags),
++ TP_STRUCT__entry(__field(int, olddfd) __string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname) __field(int, flags)),
++ TP_fast_assign(tp_assign(olddfd, olddfd) tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_perf_event_open
++SC_TRACE_EVENT(sys_perf_event_open,
++ TP_PROTO(struct perf_event_attr * attr_uptr, pid_t pid, int cpu, int group_fd, unsigned long flags),
++ TP_ARGS(attr_uptr, pid, cpu, group_fd, flags),
++ TP_STRUCT__entry(__field_hex(struct perf_event_attr *, attr_uptr) __field(pid_t, pid) __field(int, cpu) __field(int, group_fd) __field(unsigned long, flags)),
++ TP_fast_assign(tp_assign(attr_uptr, attr_uptr) tp_assign(pid, pid) tp_assign(cpu, cpu) tp_assign(group_fd, group_fd) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_preadv
++SC_TRACE_EVENT(sys_preadv,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h),
++ TP_ARGS(fd, vec, vlen, pos_l, pos_h),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen) __field(unsigned long, pos_l) __field(unsigned long, pos_h)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen) tp_assign(pos_l, pos_l) tp_assign(pos_h, pos_h)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_pwritev
++SC_TRACE_EVENT(sys_pwritev,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h),
++ TP_ARGS(fd, vec, vlen, pos_l, pos_h),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen) __field(unsigned long, pos_l) __field(unsigned long, pos_h)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen) tp_assign(pos_l, pos_l) tp_assign(pos_h, pos_h)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setsockopt
++SC_TRACE_EVENT(sys_setsockopt,
++ TP_PROTO(int fd, int level, int optname, char * optval, int optlen),
++ TP_ARGS(fd, level, optname, optval, optlen),
++ TP_STRUCT__entry(__field(int, fd) __field(int, level) __field(int, optname) __field_hex(char *, optval) __field(int, optlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(level, level) tp_assign(optname, optname) tp_assign(optval, optval) tp_assign(optlen, optlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getsockopt
++SC_TRACE_EVENT(sys_getsockopt,
++ TP_PROTO(int fd, int level, int optname, char * optval, int * optlen),
++ TP_ARGS(fd, level, optname, optval, optlen),
++ TP_STRUCT__entry(__field(int, fd) __field(int, level) __field(int, optname) __field_hex(char *, optval) __field_hex(int *, optlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(level, level) tp_assign(optname, optname) tp_assign(optval, optval) tp_assign(optlen, optlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_recvmmsg
++SC_TRACE_EVENT(sys_recvmmsg,
++ TP_PROTO(int fd, struct mmsghdr * mmsg, unsigned int vlen, unsigned int flags, struct timespec * timeout),
++ TP_ARGS(fd, mmsg, vlen, flags, timeout),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct mmsghdr *, mmsg) __field(unsigned int, vlen) __field(unsigned int, flags) __field_hex(struct timespec *, timeout)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(mmsg, mmsg) tp_assign(vlen, vlen) tp_assign(flags, flags) tp_assign(timeout, timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ipc
++SC_TRACE_EVENT(sys_ipc,
++ TP_PROTO(unsigned int call, int first, unsigned long second, unsigned long third, void * ptr, long fifth),
++ TP_ARGS(call, first, second, third, ptr, fifth),
++ TP_STRUCT__entry(__field(unsigned int, call) __field(int, first) __field(unsigned long, second) __field(unsigned long, third) __field_hex(void *, ptr) __field(long, fifth)),
++ TP_fast_assign(tp_assign(call, call) tp_assign(first, first) tp_assign(second, second) tp_assign(third, third) tp_assign(ptr, ptr) tp_assign(fifth, fifth)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_futex
++SC_TRACE_EVENT(sys_futex,
++ TP_PROTO(u32 * uaddr, int op, u32 val, struct timespec * utime, u32 * uaddr2, u32 val3),
++ TP_ARGS(uaddr, op, val, utime, uaddr2, val3),
++ TP_STRUCT__entry(__field_hex(u32 *, uaddr) __field(int, op) __field(u32, val) __field_hex(struct timespec *, utime) __field_hex(u32 *, uaddr2) __field(u32, val3)),
++ TP_fast_assign(tp_assign(uaddr, uaddr) tp_assign(op, op) tp_assign(val, val) tp_assign(utime, utime) tp_assign(uaddr2, uaddr2) tp_assign(val3, val3)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_pselect6
++SC_TRACE_EVENT(sys_pselect6,
++ TP_PROTO(int n, fd_set * inp, fd_set * outp, fd_set * exp, struct timespec * tsp, void * sig),
++ TP_ARGS(n, inp, outp, exp, tsp, sig),
++ TP_STRUCT__entry(__field(int, n) __field_hex(fd_set *, inp) __field_hex(fd_set *, outp) __field_hex(fd_set *, exp) __field_hex(struct timespec *, tsp) __field_hex(void *, sig)),
++ TP_fast_assign(tp_assign(n, n) tp_assign(inp, inp) tp_assign(outp, outp) tp_assign(exp, exp) tp_assign(tsp, tsp) tp_assign(sig, sig)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_splice
++SC_TRACE_EVENT(sys_splice,
++ TP_PROTO(int fd_in, loff_t * off_in, int fd_out, loff_t * off_out, size_t len, unsigned int flags),
++ TP_ARGS(fd_in, off_in, fd_out, off_out, len, flags),
++ TP_STRUCT__entry(__field(int, fd_in) __field_hex(loff_t *, off_in) __field(int, fd_out) __field_hex(loff_t *, off_out) __field(size_t, len) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd_in, fd_in) tp_assign(off_in, off_in) tp_assign(fd_out, fd_out) tp_assign(off_out, off_out) tp_assign(len, len) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_epoll_pwait
++SC_TRACE_EVENT(sys_epoll_pwait,
++ TP_PROTO(int epfd, struct epoll_event * events, int maxevents, int timeout, const sigset_t * sigmask, size_t sigsetsize),
++ TP_ARGS(epfd, events, maxevents, timeout, sigmask, sigsetsize),
++ TP_STRUCT__entry(__field(int, epfd) __field_hex(struct epoll_event *, events) __field(int, maxevents) __field(int, timeout) __field_hex(const sigset_t *, sigmask) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(epfd, epfd) tp_assign(events, events) tp_assign(maxevents, maxevents) tp_assign(timeout, timeout) tp_assign(sigmask, sigmask) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sendto
++SC_TRACE_EVENT(sys_sendto,
++ TP_PROTO(int fd, void * buff, size_t len, unsigned flags, struct sockaddr * addr, int addr_len),
++ TP_ARGS(fd, buff, len, flags, addr, addr_len),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(void *, buff) __field(size_t, len) __field(unsigned, flags) __field_hex(struct sockaddr *, addr) __field_hex(int, addr_len)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buff, buff) tp_assign(len, len) tp_assign(flags, flags) tp_assign(addr, addr) tp_assign(addr_len, addr_len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_recvfrom
++SC_TRACE_EVENT(sys_recvfrom,
++ TP_PROTO(int fd, void * ubuf, size_t size, unsigned flags, struct sockaddr * addr, int * addr_len),
++ TP_ARGS(fd, ubuf, size, flags, addr, addr_len),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(void *, ubuf) __field(size_t, size) __field(unsigned, flags) __field_hex(struct sockaddr *, addr) __field_hex(int *, addr_len)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(ubuf, ubuf) tp_assign(size, size) tp_assign(flags, flags) tp_assign(addr, addr) tp_assign(addr_len, addr_len)),
++ TP_printk()
++)
++#endif
++
++#endif /* _TRACE_SYSCALLS_POINTERS_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
++
++#else /* CREATE_SYSCALL_TABLE */
++
++#include "powerpc-32-syscalls-3.0.34_pointers_override.h"
++#include "syscalls_pointers_override.h"
++
++#ifndef OVERRIDE_TABLE_32_sys_read
++TRACE_SYSCALL_TABLE(sys_read, sys_read, 3, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_write
++TRACE_SYSCALL_TABLE(sys_write, sys_write, 4, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_open
++TRACE_SYSCALL_TABLE(sys_open, sys_open, 5, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_waitpid
++TRACE_SYSCALL_TABLE(sys_waitpid, sys_waitpid, 7, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_creat
++TRACE_SYSCALL_TABLE(sys_creat, sys_creat, 8, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_link
++TRACE_SYSCALL_TABLE(sys_link, sys_link, 9, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_unlink
++TRACE_SYSCALL_TABLE(sys_unlink, sys_unlink, 10, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_chdir
++TRACE_SYSCALL_TABLE(sys_chdir, sys_chdir, 12, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_time
++TRACE_SYSCALL_TABLE(sys_time, sys_time, 13, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mknod
++TRACE_SYSCALL_TABLE(sys_mknod, sys_mknod, 14, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_chmod
++TRACE_SYSCALL_TABLE(sys_chmod, sys_chmod, 15, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lchown
++TRACE_SYSCALL_TABLE(sys_lchown, sys_lchown, 16, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_stat
++TRACE_SYSCALL_TABLE(sys_stat, sys_stat, 18, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mount
++TRACE_SYSCALL_TABLE(sys_mount, sys_mount, 21, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_oldumount
++TRACE_SYSCALL_TABLE(sys_oldumount, sys_oldumount, 22, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_stime
++TRACE_SYSCALL_TABLE(sys_stime, sys_stime, 25, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fstat
++TRACE_SYSCALL_TABLE(sys_fstat, sys_fstat, 28, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_utime
++TRACE_SYSCALL_TABLE(sys_utime, sys_utime, 30, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_access
++TRACE_SYSCALL_TABLE(sys_access, sys_access, 33, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rename
++TRACE_SYSCALL_TABLE(sys_rename, sys_rename, 38, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mkdir
++TRACE_SYSCALL_TABLE(sys_mkdir, sys_mkdir, 39, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rmdir
++TRACE_SYSCALL_TABLE(sys_rmdir, sys_rmdir, 40, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_pipe
++TRACE_SYSCALL_TABLE(sys_pipe, sys_pipe, 42, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_times
++TRACE_SYSCALL_TABLE(sys_times, sys_times, 43, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_acct
++TRACE_SYSCALL_TABLE(sys_acct, sys_acct, 51, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_umount
++TRACE_SYSCALL_TABLE(sys_umount, sys_umount, 52, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_olduname
++TRACE_SYSCALL_TABLE(sys_olduname, sys_olduname, 59, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_chroot
++TRACE_SYSCALL_TABLE(sys_chroot, sys_chroot, 61, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ustat
++TRACE_SYSCALL_TABLE(sys_ustat, sys_ustat, 62, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sigpending
++TRACE_SYSCALL_TABLE(sys_sigpending, sys_sigpending, 73, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sethostname
++TRACE_SYSCALL_TABLE(sys_sethostname, sys_sethostname, 74, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setrlimit
++TRACE_SYSCALL_TABLE(sys_setrlimit, sys_setrlimit, 75, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_old_getrlimit
++TRACE_SYSCALL_TABLE(sys_old_getrlimit, sys_old_getrlimit, 76, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getrusage
++TRACE_SYSCALL_TABLE(sys_getrusage, sys_getrusage, 77, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_gettimeofday
++TRACE_SYSCALL_TABLE(sys_gettimeofday, sys_gettimeofday, 78, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_settimeofday
++TRACE_SYSCALL_TABLE(sys_settimeofday, sys_settimeofday, 79, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getgroups
++TRACE_SYSCALL_TABLE(sys_getgroups, sys_getgroups, 80, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setgroups
++TRACE_SYSCALL_TABLE(sys_setgroups, sys_setgroups, 81, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_symlink
++TRACE_SYSCALL_TABLE(sys_symlink, sys_symlink, 83, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lstat
++TRACE_SYSCALL_TABLE(sys_lstat, sys_lstat, 84, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_readlink
++TRACE_SYSCALL_TABLE(sys_readlink, sys_readlink, 85, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_uselib
++TRACE_SYSCALL_TABLE(sys_uselib, sys_uselib, 86, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_swapon
++TRACE_SYSCALL_TABLE(sys_swapon, sys_swapon, 87, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_reboot
++TRACE_SYSCALL_TABLE(sys_reboot, sys_reboot, 88, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_old_readdir
++TRACE_SYSCALL_TABLE(sys_old_readdir, sys_old_readdir, 89, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_truncate
++TRACE_SYSCALL_TABLE(sys_truncate, sys_truncate, 92, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_statfs
++TRACE_SYSCALL_TABLE(sys_statfs, sys_statfs, 99, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fstatfs
++TRACE_SYSCALL_TABLE(sys_fstatfs, sys_fstatfs, 100, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_socketcall
++TRACE_SYSCALL_TABLE(sys_socketcall, sys_socketcall, 102, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_syslog
++TRACE_SYSCALL_TABLE(sys_syslog, sys_syslog, 103, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setitimer
++TRACE_SYSCALL_TABLE(sys_setitimer, sys_setitimer, 104, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getitimer
++TRACE_SYSCALL_TABLE(sys_getitimer, sys_getitimer, 105, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_newstat
++TRACE_SYSCALL_TABLE(sys_newstat, sys_newstat, 106, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_newlstat
++TRACE_SYSCALL_TABLE(sys_newlstat, sys_newlstat, 107, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_newfstat
++TRACE_SYSCALL_TABLE(sys_newfstat, sys_newfstat, 108, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_uname
++TRACE_SYSCALL_TABLE(sys_uname, sys_uname, 109, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_wait4
++TRACE_SYSCALL_TABLE(sys_wait4, sys_wait4, 114, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_swapoff
++TRACE_SYSCALL_TABLE(sys_swapoff, sys_swapoff, 115, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sysinfo
++TRACE_SYSCALL_TABLE(sys_sysinfo, sys_sysinfo, 116, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ipc
++TRACE_SYSCALL_TABLE(sys_ipc, sys_ipc, 117, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setdomainname
++TRACE_SYSCALL_TABLE(sys_setdomainname, sys_setdomainname, 121, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_newuname
++TRACE_SYSCALL_TABLE(sys_newuname, sys_newuname, 122, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_adjtimex
++TRACE_SYSCALL_TABLE(sys_adjtimex, sys_adjtimex, 124, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sigprocmask
++TRACE_SYSCALL_TABLE(sys_sigprocmask, sys_sigprocmask, 126, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_init_module
++TRACE_SYSCALL_TABLE(sys_init_module, sys_init_module, 128, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_delete_module
++TRACE_SYSCALL_TABLE(sys_delete_module, sys_delete_module, 129, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_llseek
++TRACE_SYSCALL_TABLE(sys_llseek, sys_llseek, 140, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getdents
++TRACE_SYSCALL_TABLE(sys_getdents, sys_getdents, 141, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_select
++TRACE_SYSCALL_TABLE(sys_select, sys_select, 142, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_readv
++TRACE_SYSCALL_TABLE(sys_readv, sys_readv, 145, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_writev
++TRACE_SYSCALL_TABLE(sys_writev, sys_writev, 146, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sysctl
++TRACE_SYSCALL_TABLE(sys_sysctl, sys_sysctl, 149, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_setparam
++TRACE_SYSCALL_TABLE(sys_sched_setparam, sys_sched_setparam, 154, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_getparam
++TRACE_SYSCALL_TABLE(sys_sched_getparam, sys_sched_getparam, 155, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_setscheduler
++TRACE_SYSCALL_TABLE(sys_sched_setscheduler, sys_sched_setscheduler, 156, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_rr_get_interval
++TRACE_SYSCALL_TABLE(sys_sched_rr_get_interval, sys_sched_rr_get_interval, 161, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_nanosleep
++TRACE_SYSCALL_TABLE(sys_nanosleep, sys_nanosleep, 162, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getresuid
++TRACE_SYSCALL_TABLE(sys_getresuid, sys_getresuid, 165, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_poll
++TRACE_SYSCALL_TABLE(sys_poll, sys_poll, 167, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_nfsservctl
++TRACE_SYSCALL_TABLE(sys_nfsservctl, sys_nfsservctl, 168, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getresgid
++TRACE_SYSCALL_TABLE(sys_getresgid, sys_getresgid, 170, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rt_sigaction
++TRACE_SYSCALL_TABLE(sys_rt_sigaction, sys_rt_sigaction, 173, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rt_sigprocmask
++TRACE_SYSCALL_TABLE(sys_rt_sigprocmask, sys_rt_sigprocmask, 174, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rt_sigpending
++TRACE_SYSCALL_TABLE(sys_rt_sigpending, sys_rt_sigpending, 175, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rt_sigtimedwait
++TRACE_SYSCALL_TABLE(sys_rt_sigtimedwait, sys_rt_sigtimedwait, 176, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rt_sigqueueinfo
++TRACE_SYSCALL_TABLE(sys_rt_sigqueueinfo, sys_rt_sigqueueinfo, 177, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rt_sigsuspend
++TRACE_SYSCALL_TABLE(sys_rt_sigsuspend, sys_rt_sigsuspend, 178, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_chown
++TRACE_SYSCALL_TABLE(sys_chown, sys_chown, 181, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getcwd
++TRACE_SYSCALL_TABLE(sys_getcwd, sys_getcwd, 182, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sendfile
++TRACE_SYSCALL_TABLE(sys_sendfile, sys_sendfile, 186, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getrlimit
++TRACE_SYSCALL_TABLE(sys_getrlimit, sys_getrlimit, 190, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_stat64
++TRACE_SYSCALL_TABLE(sys_stat64, sys_stat64, 195, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lstat64
++TRACE_SYSCALL_TABLE(sys_lstat64, sys_lstat64, 196, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fstat64
++TRACE_SYSCALL_TABLE(sys_fstat64, sys_fstat64, 197, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_pciconfig_read
++TRACE_SYSCALL_TABLE(sys_pciconfig_read, sys_pciconfig_read, 198, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_pciconfig_write
++TRACE_SYSCALL_TABLE(sys_pciconfig_write, sys_pciconfig_write, 199, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getdents64
++TRACE_SYSCALL_TABLE(sys_getdents64, sys_getdents64, 202, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_pivot_root
++TRACE_SYSCALL_TABLE(sys_pivot_root, sys_pivot_root, 203, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mincore
++TRACE_SYSCALL_TABLE(sys_mincore, sys_mincore, 206, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setxattr
++TRACE_SYSCALL_TABLE(sys_setxattr, sys_setxattr, 209, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lsetxattr
++TRACE_SYSCALL_TABLE(sys_lsetxattr, sys_lsetxattr, 210, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fsetxattr
++TRACE_SYSCALL_TABLE(sys_fsetxattr, sys_fsetxattr, 211, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getxattr
++TRACE_SYSCALL_TABLE(sys_getxattr, sys_getxattr, 212, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lgetxattr
++TRACE_SYSCALL_TABLE(sys_lgetxattr, sys_lgetxattr, 213, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fgetxattr
++TRACE_SYSCALL_TABLE(sys_fgetxattr, sys_fgetxattr, 214, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_listxattr
++TRACE_SYSCALL_TABLE(sys_listxattr, sys_listxattr, 215, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_llistxattr
++TRACE_SYSCALL_TABLE(sys_llistxattr, sys_llistxattr, 216, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_flistxattr
++TRACE_SYSCALL_TABLE(sys_flistxattr, sys_flistxattr, 217, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_removexattr
++TRACE_SYSCALL_TABLE(sys_removexattr, sys_removexattr, 218, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lremovexattr
++TRACE_SYSCALL_TABLE(sys_lremovexattr, sys_lremovexattr, 219, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fremovexattr
++TRACE_SYSCALL_TABLE(sys_fremovexattr, sys_fremovexattr, 220, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_futex
++TRACE_SYSCALL_TABLE(sys_futex, sys_futex, 221, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_setaffinity
++TRACE_SYSCALL_TABLE(sys_sched_setaffinity, sys_sched_setaffinity, 222, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_getaffinity
++TRACE_SYSCALL_TABLE(sys_sched_getaffinity, sys_sched_getaffinity, 223, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sendfile64
++TRACE_SYSCALL_TABLE(sys_sendfile64, sys_sendfile64, 226, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_io_setup
++TRACE_SYSCALL_TABLE(sys_io_setup, sys_io_setup, 227, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_io_getevents
++TRACE_SYSCALL_TABLE(sys_io_getevents, sys_io_getevents, 229, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_io_submit
++TRACE_SYSCALL_TABLE(sys_io_submit, sys_io_submit, 230, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_io_cancel
++TRACE_SYSCALL_TABLE(sys_io_cancel, sys_io_cancel, 231, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_set_tid_address
++TRACE_SYSCALL_TABLE(sys_set_tid_address, sys_set_tid_address, 232, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_epoll_ctl
++TRACE_SYSCALL_TABLE(sys_epoll_ctl, sys_epoll_ctl, 237, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_epoll_wait
++TRACE_SYSCALL_TABLE(sys_epoll_wait, sys_epoll_wait, 238, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timer_create
++TRACE_SYSCALL_TABLE(sys_timer_create, sys_timer_create, 240, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timer_settime
++TRACE_SYSCALL_TABLE(sys_timer_settime, sys_timer_settime, 241, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timer_gettime
++TRACE_SYSCALL_TABLE(sys_timer_gettime, sys_timer_gettime, 242, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_clock_settime
++TRACE_SYSCALL_TABLE(sys_clock_settime, sys_clock_settime, 245, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_clock_gettime
++TRACE_SYSCALL_TABLE(sys_clock_gettime, sys_clock_gettime, 246, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_clock_getres
++TRACE_SYSCALL_TABLE(sys_clock_getres, sys_clock_getres, 247, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_clock_nanosleep
++TRACE_SYSCALL_TABLE(sys_clock_nanosleep, sys_clock_nanosleep, 248, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_utimes
++TRACE_SYSCALL_TABLE(sys_utimes, sys_utimes, 251, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_statfs64
++TRACE_SYSCALL_TABLE(sys_statfs64, sys_statfs64, 252, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fstatfs64
++TRACE_SYSCALL_TABLE(sys_fstatfs64, sys_fstatfs64, 253, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mq_open
++TRACE_SYSCALL_TABLE(sys_mq_open, sys_mq_open, 262, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mq_unlink
++TRACE_SYSCALL_TABLE(sys_mq_unlink, sys_mq_unlink, 263, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mq_timedsend
++TRACE_SYSCALL_TABLE(sys_mq_timedsend, sys_mq_timedsend, 264, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mq_timedreceive
++TRACE_SYSCALL_TABLE(sys_mq_timedreceive, sys_mq_timedreceive, 265, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mq_notify
++TRACE_SYSCALL_TABLE(sys_mq_notify, sys_mq_notify, 266, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mq_getsetattr
++TRACE_SYSCALL_TABLE(sys_mq_getsetattr, sys_mq_getsetattr, 267, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_add_key
++TRACE_SYSCALL_TABLE(sys_add_key, sys_add_key, 269, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_request_key
++TRACE_SYSCALL_TABLE(sys_request_key, sys_request_key, 270, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_waitid
++TRACE_SYSCALL_TABLE(sys_waitid, sys_waitid, 272, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_inotify_add_watch
++TRACE_SYSCALL_TABLE(sys_inotify_add_watch, sys_inotify_add_watch, 276, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_pselect6
++TRACE_SYSCALL_TABLE(sys_pselect6, sys_pselect6, 280, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ppoll
++TRACE_SYSCALL_TABLE(sys_ppoll, sys_ppoll, 281, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_splice
++TRACE_SYSCALL_TABLE(sys_splice, sys_splice, 283, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_vmsplice
++TRACE_SYSCALL_TABLE(sys_vmsplice, sys_vmsplice, 285, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_openat
++TRACE_SYSCALL_TABLE(sys_openat, sys_openat, 286, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mkdirat
++TRACE_SYSCALL_TABLE(sys_mkdirat, sys_mkdirat, 287, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mknodat
++TRACE_SYSCALL_TABLE(sys_mknodat, sys_mknodat, 288, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fchownat
++TRACE_SYSCALL_TABLE(sys_fchownat, sys_fchownat, 289, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_futimesat
++TRACE_SYSCALL_TABLE(sys_futimesat, sys_futimesat, 290, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fstatat64
++TRACE_SYSCALL_TABLE(sys_fstatat64, sys_fstatat64, 291, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_unlinkat
++TRACE_SYSCALL_TABLE(sys_unlinkat, sys_unlinkat, 292, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_renameat
++TRACE_SYSCALL_TABLE(sys_renameat, sys_renameat, 293, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_linkat
++TRACE_SYSCALL_TABLE(sys_linkat, sys_linkat, 294, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_symlinkat
++TRACE_SYSCALL_TABLE(sys_symlinkat, sys_symlinkat, 295, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_readlinkat
++TRACE_SYSCALL_TABLE(sys_readlinkat, sys_readlinkat, 296, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fchmodat
++TRACE_SYSCALL_TABLE(sys_fchmodat, sys_fchmodat, 297, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_faccessat
++TRACE_SYSCALL_TABLE(sys_faccessat, sys_faccessat, 298, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_get_robust_list
++TRACE_SYSCALL_TABLE(sys_get_robust_list, sys_get_robust_list, 299, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_set_robust_list
++TRACE_SYSCALL_TABLE(sys_set_robust_list, sys_set_robust_list, 300, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getcpu
++TRACE_SYSCALL_TABLE(sys_getcpu, sys_getcpu, 302, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_epoll_pwait
++TRACE_SYSCALL_TABLE(sys_epoll_pwait, sys_epoll_pwait, 303, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_utimensat
++TRACE_SYSCALL_TABLE(sys_utimensat, sys_utimensat, 304, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_signalfd
++TRACE_SYSCALL_TABLE(sys_signalfd, sys_signalfd, 305, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timerfd_settime
++TRACE_SYSCALL_TABLE(sys_timerfd_settime, sys_timerfd_settime, 311, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timerfd_gettime
++TRACE_SYSCALL_TABLE(sys_timerfd_gettime, sys_timerfd_gettime, 312, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_signalfd4
++TRACE_SYSCALL_TABLE(sys_signalfd4, sys_signalfd4, 313, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_pipe2
++TRACE_SYSCALL_TABLE(sys_pipe2, sys_pipe2, 317, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_perf_event_open
++TRACE_SYSCALL_TABLE(sys_perf_event_open, sys_perf_event_open, 319, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_preadv
++TRACE_SYSCALL_TABLE(sys_preadv, sys_preadv, 320, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_pwritev
++TRACE_SYSCALL_TABLE(sys_pwritev, sys_pwritev, 321, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rt_tgsigqueueinfo
++TRACE_SYSCALL_TABLE(sys_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, 322, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_prlimit64
++TRACE_SYSCALL_TABLE(sys_prlimit64, sys_prlimit64, 325, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_bind
++TRACE_SYSCALL_TABLE(sys_bind, sys_bind, 327, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_connect
++TRACE_SYSCALL_TABLE(sys_connect, sys_connect, 328, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_accept
++TRACE_SYSCALL_TABLE(sys_accept, sys_accept, 330, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getsockname
++TRACE_SYSCALL_TABLE(sys_getsockname, sys_getsockname, 331, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getpeername
++TRACE_SYSCALL_TABLE(sys_getpeername, sys_getpeername, 332, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_socketpair
++TRACE_SYSCALL_TABLE(sys_socketpair, sys_socketpair, 333, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_send
++TRACE_SYSCALL_TABLE(sys_send, sys_send, 334, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sendto
++TRACE_SYSCALL_TABLE(sys_sendto, sys_sendto, 335, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_recvfrom
++TRACE_SYSCALL_TABLE(sys_recvfrom, sys_recvfrom, 337, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setsockopt
++TRACE_SYSCALL_TABLE(sys_setsockopt, sys_setsockopt, 339, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getsockopt
++TRACE_SYSCALL_TABLE(sys_getsockopt, sys_getsockopt, 340, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sendmsg
++TRACE_SYSCALL_TABLE(sys_sendmsg, sys_sendmsg, 341, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_recvmsg
++TRACE_SYSCALL_TABLE(sys_recvmsg, sys_recvmsg, 342, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_recvmmsg
++TRACE_SYSCALL_TABLE(sys_recvmmsg, sys_recvmmsg, 343, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_accept4
++TRACE_SYSCALL_TABLE(sys_accept4, sys_accept4, 344, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_clock_adjtime
++TRACE_SYSCALL_TABLE(sys_clock_adjtime, sys_clock_adjtime, 347, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sendmmsg
++TRACE_SYSCALL_TABLE(sys_sendmmsg, sys_sendmmsg, 349, 4)
++#endif
++
++#endif /* CREATE_SYSCALL_TABLE */
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/powerpc-32-syscalls-3.0.34_pointers_override.h
+@@ -0,0 +1,36 @@
++#define OVERRIDE_TABLE_32_sys_mmap2
++
++#ifndef CREATE_SYSCALL_TABLE
++
++SC_TRACE_EVENT(sys_mmap2,
++ TP_PROTO(void *addr, size_t len, int prot,
++ int flags, int fd, off_t pgoff),
++ TP_ARGS(addr, len, prot, flags, fd, pgoff),
++ TP_STRUCT__entry(
++ __field_hex(void *, addr)
++ __field(size_t, len)
++ __field(int, prot)
++ __field(int, flags)
++ __field(int, fd)
++ __field(off_t, pgoff)),
++ TP_fast_assign(
++ tp_assign(addr, addr)
++ tp_assign(len, len)
++ tp_assign(prot, prot)
++ tp_assign(flags, flags)
++ tp_assign(fd, fd)
++ tp_assign(pgoff, pgoff)),
++ TP_printk()
++)
++
++#else /* CREATE_SYSCALL_TABLE */
++
++#define OVERRIDE_TABLE_32_sys_execve
++TRACE_SYSCALL_TABLE(sys_execve, sys_execve, 11, 3)
++#define OVERRIDE_TABLE_32_sys_clone
++TRACE_SYSCALL_TABLE(sys_clone, sys_clone, 120, 5)
++#define OVERRIDE_TABLE_32_sys_mmap2
++TRACE_SYSCALL_TABLE(sys_mmap2, sys_mmap2, 192, 6)
++
++#endif /* CREATE_SYSCALL_TABLE */
++
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_integers.h
+@@ -0,0 +1,15 @@
++#ifdef CONFIG_X86_64
++#include "x86-64-syscalls-3.10.0-rc7_integers.h"
++#endif
++
++#ifdef CONFIG_X86_32
++#include "x86-32-syscalls-3.1.0-rc6_integers.h"
++#endif
++
++#ifdef CONFIG_ARM
++#include "arm-32-syscalls-3.4.25_integers.h"
++#endif
++
++#ifdef CONFIG_PPC
++#include "powerpc-32-syscalls-3.0.34_integers.h"
++#endif
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_integers_override.h
+@@ -0,0 +1,14 @@
++#define OVERRIDE_32_sys_mmap
++#define OVERRIDE_64_sys_mmap
++
++#ifndef CREATE_SYSCALL_TABLE
++
++SC_TRACE_EVENT(sys_mmap,
++ TP_PROTO(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long off),
++ TP_ARGS(addr, len, prot, flags, fd, off),
++ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(size_t, len) __field(int, prot) __field(int, flags) __field(int, fd) __field(off_t, offset)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len) tp_assign(prot, prot) tp_assign(flags, flags) tp_assign(fd, fd) tp_assign(offset, off)),
++ TP_printk()
++)
++
++#endif /* CREATE_SYSCALL_TABLE */
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers.h
+@@ -0,0 +1,15 @@
++#ifdef CONFIG_X86_64
++#include "x86-64-syscalls-3.10.0-rc7_pointers.h"
++#endif
++
++#ifdef CONFIG_X86_32
++#include "x86-32-syscalls-3.1.0-rc6_pointers.h"
++#endif
++
++#ifdef CONFIG_ARM
++#include "arm-32-syscalls-3.4.25_pointers.h"
++#endif
++
++#ifdef CONFIG_PPC
++#include "powerpc-32-syscalls-3.0.34_pointers.h"
++#endif
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers_override.h
+@@ -0,0 +1,53 @@
++#define OVERRIDE_32_sys_execve
++#define OVERRIDE_64_sys_execve
++
++#ifndef CREATE_SYSCALL_TABLE
++
++SC_TRACE_EVENT(sys_execve,
++ TP_PROTO(const char *filename, char *const *argv, char *const *envp),
++ TP_ARGS(filename, argv, envp),
++ TP_STRUCT__entry(__string_from_user(filename, filename)
++ __field_hex(char *const *, argv)
++ __field_hex(char *const *, envp)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename)
++ tp_assign(argv, argv)
++ tp_assign(envp, envp)),
++ TP_printk()
++)
++
++SC_TRACE_EVENT(sys_clone,
++ TP_PROTO(unsigned long clone_flags, unsigned long newsp,
++ void __user *parent_tid,
++ void __user *child_tid),
++ TP_ARGS(clone_flags, newsp, parent_tid, child_tid),
++ TP_STRUCT__entry(
++ __field_hex(unsigned long, clone_flags)
++ __field_hex(unsigned long, newsp)
++ __field_hex(void *, parent_tid)
++ __field_hex(void *, child_tid)),
++ TP_fast_assign(
++ tp_assign(clone_flags, clone_flags)
++ tp_assign(newsp, newsp)
++ tp_assign(parent_tid, parent_tid)
++ tp_assign(child_tid, child_tid)),
++ TP_printk()
++)
++
++/* present in 32, missing in 64 due to old kernel headers */
++#define OVERRIDE_32_sys_getcpu
++#define OVERRIDE_64_sys_getcpu
++SC_TRACE_EVENT(sys_getcpu,
++ TP_PROTO(unsigned __user *cpup, unsigned __user *nodep, void *tcache),
++ TP_ARGS(cpup, nodep, tcache),
++ TP_STRUCT__entry(
++ __field_hex(unsigned *, cpup)
++ __field_hex(unsigned *, nodep)
++ __field_hex(void *, tcache)),
++ TP_fast_assign(
++ tp_assign(cpup, cpup)
++ tp_assign(nodep, nodep)
++ tp_assign(tcache, tcache)),
++ TP_printk()
++)
++
++#endif /* CREATE_SYSCALL_TABLE */
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_unknown.h
+@@ -0,0 +1,55 @@
++#if !defined(_TRACE_SYSCALLS_UNKNOWN_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SYSCALLS_UNKNOWN_H
++
++#include <linux/tracepoint.h>
++#include <linux/syscalls.h>
++
++#define UNKNOWN_SYSCALL_NRARGS 6
++
++TRACE_EVENT(sys_unknown,
++ TP_PROTO(unsigned int id, unsigned long *args),
++ TP_ARGS(id, args),
++ TP_STRUCT__entry(
++ __field(unsigned int, id)
++ __array(unsigned long, args, UNKNOWN_SYSCALL_NRARGS)
++ ),
++ TP_fast_assign(
++ tp_assign(id, id)
++ tp_memcpy(args, args, UNKNOWN_SYSCALL_NRARGS * sizeof(*args))
++ ),
++ TP_printk()
++)
++TRACE_EVENT(compat_sys_unknown,
++ TP_PROTO(unsigned int id, unsigned long *args),
++ TP_ARGS(id, args),
++ TP_STRUCT__entry(
++ __field(unsigned int, id)
++ __array(unsigned long, args, UNKNOWN_SYSCALL_NRARGS)
++ ),
++ TP_fast_assign(
++ tp_assign(id, id)
++ tp_memcpy(args, args, UNKNOWN_SYSCALL_NRARGS * sizeof(*args))
++ ),
++ TP_printk()
++)
++/*
++ * This is going to hook on sys_exit in the kernel.
++ * We change the name so we don't clash with the sys_exit syscall entry
++ * event.
++ */
++TRACE_EVENT(exit_syscall,
++ TP_PROTO(struct pt_regs *regs, long ret),
++ TP_ARGS(regs, ret),
++ TP_STRUCT__entry(
++ __field(long, ret)
++ ),
++ TP_fast_assign(
++ tp_assign(ret, ret)
++ ),
++ TP_printk()
++)
++
++#endif /* _TRACE_SYSCALLS_UNKNOWN_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_integers.h
+@@ -0,0 +1,1163 @@
++/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */
++#ifndef CREATE_SYSCALL_TABLE
++
++#if !defined(_TRACE_SYSCALLS_INTEGERS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SYSCALLS_INTEGERS_H
++
++#include <linux/tracepoint.h>
++#include <linux/syscalls.h>
++#include "x86-32-syscalls-3.1.0-rc6_integers_override.h"
++#include "syscalls_integers_override.h"
++
++SC_DECLARE_EVENT_CLASS_NOARGS(syscalls_noargs,
++ TP_STRUCT__entry(),
++ TP_fast_assign(),
++ TP_printk()
++)
++#ifndef OVERRIDE_32_sys_restart_syscall
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_restart_syscall)
++#endif
++#ifndef OVERRIDE_32_sys_getpid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getpid)
++#endif
++#ifndef OVERRIDE_32_sys_getuid16
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getuid16)
++#endif
++#ifndef OVERRIDE_32_sys_pause
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_pause)
++#endif
++#ifndef OVERRIDE_32_sys_sync
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sync)
++#endif
++#ifndef OVERRIDE_32_sys_getgid16
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getgid16)
++#endif
++#ifndef OVERRIDE_32_sys_geteuid16
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_geteuid16)
++#endif
++#ifndef OVERRIDE_32_sys_getegid16
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getegid16)
++#endif
++#ifndef OVERRIDE_32_sys_getppid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getppid)
++#endif
++#ifndef OVERRIDE_32_sys_getpgrp
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getpgrp)
++#endif
++#ifndef OVERRIDE_32_sys_setsid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_setsid)
++#endif
++#ifndef OVERRIDE_32_sys_sgetmask
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sgetmask)
++#endif
++#ifndef OVERRIDE_32_sys_vhangup
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_vhangup)
++#endif
++#ifndef OVERRIDE_32_sys_munlockall
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_munlockall)
++#endif
++#ifndef OVERRIDE_32_sys_sched_yield
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sched_yield)
++#endif
++#ifndef OVERRIDE_32_sys_getuid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getuid)
++#endif
++#ifndef OVERRIDE_32_sys_getgid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getgid)
++#endif
++#ifndef OVERRIDE_32_sys_geteuid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_geteuid)
++#endif
++#ifndef OVERRIDE_32_sys_getegid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getegid)
++#endif
++#ifndef OVERRIDE_32_sys_gettid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_gettid)
++#endif
++#ifndef OVERRIDE_32_sys_inotify_init
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_inotify_init)
++#endif
++#ifndef OVERRIDE_32_sys_exit
++SC_TRACE_EVENT(sys_exit,
++ TP_PROTO(int error_code),
++ TP_ARGS(error_code),
++ TP_STRUCT__entry(__field(int, error_code)),
++ TP_fast_assign(tp_assign(error_code, error_code)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_close
++SC_TRACE_EVENT(sys_close,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setuid16
++SC_TRACE_EVENT(sys_setuid16,
++ TP_PROTO(old_uid_t uid),
++ TP_ARGS(uid),
++ TP_STRUCT__entry(__field(old_uid_t, uid)),
++ TP_fast_assign(tp_assign(uid, uid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_alarm
++SC_TRACE_EVENT(sys_alarm,
++ TP_PROTO(unsigned int seconds),
++ TP_ARGS(seconds),
++ TP_STRUCT__entry(__field(unsigned int, seconds)),
++ TP_fast_assign(tp_assign(seconds, seconds)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_nice
++SC_TRACE_EVENT(sys_nice,
++ TP_PROTO(int increment),
++ TP_ARGS(increment),
++ TP_STRUCT__entry(__field(int, increment)),
++ TP_fast_assign(tp_assign(increment, increment)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_dup
++SC_TRACE_EVENT(sys_dup,
++ TP_PROTO(unsigned int fildes),
++ TP_ARGS(fildes),
++ TP_STRUCT__entry(__field(unsigned int, fildes)),
++ TP_fast_assign(tp_assign(fildes, fildes)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_brk
++SC_TRACE_EVENT(sys_brk,
++ TP_PROTO(unsigned long brk),
++ TP_ARGS(brk),
++ TP_STRUCT__entry(__field(unsigned long, brk)),
++ TP_fast_assign(tp_assign(brk, brk)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setgid16
++SC_TRACE_EVENT(sys_setgid16,
++ TP_PROTO(old_gid_t gid),
++ TP_ARGS(gid),
++ TP_STRUCT__entry(__field(old_gid_t, gid)),
++ TP_fast_assign(tp_assign(gid, gid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_umask
++SC_TRACE_EVENT(sys_umask,
++ TP_PROTO(int mask),
++ TP_ARGS(mask),
++ TP_STRUCT__entry(__field(int, mask)),
++ TP_fast_assign(tp_assign(mask, mask)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ssetmask
++SC_TRACE_EVENT(sys_ssetmask,
++ TP_PROTO(int newmask),
++ TP_ARGS(newmask),
++ TP_STRUCT__entry(__field(int, newmask)),
++ TP_fast_assign(tp_assign(newmask, newmask)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fsync
++SC_TRACE_EVENT(sys_fsync,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getpgid
++SC_TRACE_EVENT(sys_getpgid,
++ TP_PROTO(pid_t pid),
++ TP_ARGS(pid),
++ TP_STRUCT__entry(__field(pid_t, pid)),
++ TP_fast_assign(tp_assign(pid, pid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fchdir
++SC_TRACE_EVENT(sys_fchdir,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_personality
++SC_TRACE_EVENT(sys_personality,
++ TP_PROTO(unsigned int personality),
++ TP_ARGS(personality),
++ TP_STRUCT__entry(__field(unsigned int, personality)),
++ TP_fast_assign(tp_assign(personality, personality)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setfsuid16
++SC_TRACE_EVENT(sys_setfsuid16,
++ TP_PROTO(old_uid_t uid),
++ TP_ARGS(uid),
++ TP_STRUCT__entry(__field(old_uid_t, uid)),
++ TP_fast_assign(tp_assign(uid, uid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setfsgid16
++SC_TRACE_EVENT(sys_setfsgid16,
++ TP_PROTO(old_gid_t gid),
++ TP_ARGS(gid),
++ TP_STRUCT__entry(__field(old_gid_t, gid)),
++ TP_fast_assign(tp_assign(gid, gid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getsid
++SC_TRACE_EVENT(sys_getsid,
++ TP_PROTO(pid_t pid),
++ TP_ARGS(pid),
++ TP_STRUCT__entry(__field(pid_t, pid)),
++ TP_fast_assign(tp_assign(pid, pid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fdatasync
++SC_TRACE_EVENT(sys_fdatasync,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mlockall
++SC_TRACE_EVENT(sys_mlockall,
++ TP_PROTO(int flags),
++ TP_ARGS(flags),
++ TP_STRUCT__entry(__field(int, flags)),
++ TP_fast_assign(tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_getscheduler
++SC_TRACE_EVENT(sys_sched_getscheduler,
++ TP_PROTO(pid_t pid),
++ TP_ARGS(pid),
++ TP_STRUCT__entry(__field(pid_t, pid)),
++ TP_fast_assign(tp_assign(pid, pid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_get_priority_max
++SC_TRACE_EVENT(sys_sched_get_priority_max,
++ TP_PROTO(int policy),
++ TP_ARGS(policy),
++ TP_STRUCT__entry(__field(int, policy)),
++ TP_fast_assign(tp_assign(policy, policy)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_get_priority_min
++SC_TRACE_EVENT(sys_sched_get_priority_min,
++ TP_PROTO(int policy),
++ TP_ARGS(policy),
++ TP_STRUCT__entry(__field(int, policy)),
++ TP_fast_assign(tp_assign(policy, policy)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setuid
++SC_TRACE_EVENT(sys_setuid,
++ TP_PROTO(uid_t uid),
++ TP_ARGS(uid),
++ TP_STRUCT__entry(__field(uid_t, uid)),
++ TP_fast_assign(tp_assign(uid, uid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setgid
++SC_TRACE_EVENT(sys_setgid,
++ TP_PROTO(gid_t gid),
++ TP_ARGS(gid),
++ TP_STRUCT__entry(__field(gid_t, gid)),
++ TP_fast_assign(tp_assign(gid, gid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setfsuid
++SC_TRACE_EVENT(sys_setfsuid,
++ TP_PROTO(uid_t uid),
++ TP_ARGS(uid),
++ TP_STRUCT__entry(__field(uid_t, uid)),
++ TP_fast_assign(tp_assign(uid, uid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setfsgid
++SC_TRACE_EVENT(sys_setfsgid,
++ TP_PROTO(gid_t gid),
++ TP_ARGS(gid),
++ TP_STRUCT__entry(__field(gid_t, gid)),
++ TP_fast_assign(tp_assign(gid, gid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_io_destroy
++SC_TRACE_EVENT(sys_io_destroy,
++ TP_PROTO(aio_context_t ctx),
++ TP_ARGS(ctx),
++ TP_STRUCT__entry(__field(aio_context_t, ctx)),
++ TP_fast_assign(tp_assign(ctx, ctx)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_exit_group
++SC_TRACE_EVENT(sys_exit_group,
++ TP_PROTO(int error_code),
++ TP_ARGS(error_code),
++ TP_STRUCT__entry(__field(int, error_code)),
++ TP_fast_assign(tp_assign(error_code, error_code)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_epoll_create
++SC_TRACE_EVENT(sys_epoll_create,
++ TP_PROTO(int size),
++ TP_ARGS(size),
++ TP_STRUCT__entry(__field(int, size)),
++ TP_fast_assign(tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timer_getoverrun
++SC_TRACE_EVENT(sys_timer_getoverrun,
++ TP_PROTO(timer_t timer_id),
++ TP_ARGS(timer_id),
++ TP_STRUCT__entry(__field(timer_t, timer_id)),
++ TP_fast_assign(tp_assign(timer_id, timer_id)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timer_delete
++SC_TRACE_EVENT(sys_timer_delete,
++ TP_PROTO(timer_t timer_id),
++ TP_ARGS(timer_id),
++ TP_STRUCT__entry(__field(timer_t, timer_id)),
++ TP_fast_assign(tp_assign(timer_id, timer_id)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_unshare
++SC_TRACE_EVENT(sys_unshare,
++ TP_PROTO(unsigned long unshare_flags),
++ TP_ARGS(unshare_flags),
++ TP_STRUCT__entry(__field(unsigned long, unshare_flags)),
++ TP_fast_assign(tp_assign(unshare_flags, unshare_flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_eventfd
++SC_TRACE_EVENT(sys_eventfd,
++ TP_PROTO(unsigned int count),
++ TP_ARGS(count),
++ TP_STRUCT__entry(__field(unsigned int, count)),
++ TP_fast_assign(tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_epoll_create1
++SC_TRACE_EVENT(sys_epoll_create1,
++ TP_PROTO(int flags),
++ TP_ARGS(flags),
++ TP_STRUCT__entry(__field(int, flags)),
++ TP_fast_assign(tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_inotify_init1
++SC_TRACE_EVENT(sys_inotify_init1,
++ TP_PROTO(int flags),
++ TP_ARGS(flags),
++ TP_STRUCT__entry(__field(int, flags)),
++ TP_fast_assign(tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_syncfs
++SC_TRACE_EVENT(sys_syncfs,
++ TP_PROTO(int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_kill
++SC_TRACE_EVENT(sys_kill,
++ TP_PROTO(pid_t pid, int sig),
++ TP_ARGS(pid, sig),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_signal
++SC_TRACE_EVENT(sys_signal,
++ TP_PROTO(int sig, __sighandler_t handler),
++ TP_ARGS(sig, handler),
++ TP_STRUCT__entry(__field(int, sig) __field(__sighandler_t, handler)),
++ TP_fast_assign(tp_assign(sig, sig) tp_assign(handler, handler)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setpgid
++SC_TRACE_EVENT(sys_setpgid,
++ TP_PROTO(pid_t pid, pid_t pgid),
++ TP_ARGS(pid, pgid),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(pid_t, pgid)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(pgid, pgid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_dup2
++SC_TRACE_EVENT(sys_dup2,
++ TP_PROTO(unsigned int oldfd, unsigned int newfd),
++ TP_ARGS(oldfd, newfd),
++ TP_STRUCT__entry(__field(unsigned int, oldfd) __field(unsigned int, newfd)),
++ TP_fast_assign(tp_assign(oldfd, oldfd) tp_assign(newfd, newfd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setreuid16
++SC_TRACE_EVENT(sys_setreuid16,
++ TP_PROTO(old_uid_t ruid, old_uid_t euid),
++ TP_ARGS(ruid, euid),
++ TP_STRUCT__entry(__field(old_uid_t, ruid) __field(old_uid_t, euid)),
++ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setregid16
++SC_TRACE_EVENT(sys_setregid16,
++ TP_PROTO(old_gid_t rgid, old_gid_t egid),
++ TP_ARGS(rgid, egid),
++ TP_STRUCT__entry(__field(old_gid_t, rgid) __field(old_gid_t, egid)),
++ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_munmap
++SC_TRACE_EVENT(sys_munmap,
++ TP_PROTO(unsigned long addr, size_t len),
++ TP_ARGS(addr, len),
++ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(size_t, len)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ftruncate
++SC_TRACE_EVENT(sys_ftruncate,
++ TP_PROTO(unsigned int fd, unsigned long length),
++ TP_ARGS(fd, length),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned long, length)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(length, length)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fchmod
++SC_TRACE_EVENT(sys_fchmod,
++ TP_PROTO(unsigned int fd, mode_t mode),
++ TP_ARGS(fd, mode),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(mode_t, mode)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getpriority
++SC_TRACE_EVENT(sys_getpriority,
++ TP_PROTO(int which, int who),
++ TP_ARGS(which, who),
++ TP_STRUCT__entry(__field(int, which) __field(int, who)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_bdflush
++SC_TRACE_EVENT(sys_bdflush,
++ TP_PROTO(int func, long data),
++ TP_ARGS(func, data),
++ TP_STRUCT__entry(__field(int, func) __field(long, data)),
++ TP_fast_assign(tp_assign(func, func) tp_assign(data, data)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_flock
++SC_TRACE_EVENT(sys_flock,
++ TP_PROTO(unsigned int fd, unsigned int cmd),
++ TP_ARGS(fd, cmd),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mlock
++SC_TRACE_EVENT(sys_mlock,
++ TP_PROTO(unsigned long start, size_t len),
++ TP_ARGS(start, len),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_munlock
++SC_TRACE_EVENT(sys_munlock,
++ TP_PROTO(unsigned long start, size_t len),
++ TP_ARGS(start, len),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setreuid
++SC_TRACE_EVENT(sys_setreuid,
++ TP_PROTO(uid_t ruid, uid_t euid),
++ TP_ARGS(ruid, euid),
++ TP_STRUCT__entry(__field(uid_t, ruid) __field(uid_t, euid)),
++ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setregid
++SC_TRACE_EVENT(sys_setregid,
++ TP_PROTO(gid_t rgid, gid_t egid),
++ TP_ARGS(rgid, egid),
++ TP_STRUCT__entry(__field(gid_t, rgid) __field(gid_t, egid)),
++ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_tkill
++SC_TRACE_EVENT(sys_tkill,
++ TP_PROTO(pid_t pid, int sig),
++ TP_ARGS(pid, sig),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ioprio_get
++SC_TRACE_EVENT(sys_ioprio_get,
++ TP_PROTO(int which, int who),
++ TP_ARGS(which, who),
++ TP_STRUCT__entry(__field(int, which) __field(int, who)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_inotify_rm_watch
++SC_TRACE_EVENT(sys_inotify_rm_watch,
++ TP_PROTO(int fd, __s32 wd),
++ TP_ARGS(fd, wd),
++ TP_STRUCT__entry(__field(int, fd) __field(__s32, wd)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(wd, wd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timerfd_create
++SC_TRACE_EVENT(sys_timerfd_create,
++ TP_PROTO(int clockid, int flags),
++ TP_ARGS(clockid, flags),
++ TP_STRUCT__entry(__field(int, clockid) __field(int, flags)),
++ TP_fast_assign(tp_assign(clockid, clockid) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_eventfd2
++SC_TRACE_EVENT(sys_eventfd2,
++ TP_PROTO(unsigned int count, int flags),
++ TP_ARGS(count, flags),
++ TP_STRUCT__entry(__field(unsigned int, count) __field(int, flags)),
++ TP_fast_assign(tp_assign(count, count) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fanotify_init
++SC_TRACE_EVENT(sys_fanotify_init,
++ TP_PROTO(unsigned int flags, unsigned int event_f_flags),
++ TP_ARGS(flags, event_f_flags),
++ TP_STRUCT__entry(__field(unsigned int, flags) __field(unsigned int, event_f_flags)),
++ TP_fast_assign(tp_assign(flags, flags) tp_assign(event_f_flags, event_f_flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setns
++SC_TRACE_EVENT(sys_setns,
++ TP_PROTO(int fd, int nstype),
++ TP_ARGS(fd, nstype),
++ TP_STRUCT__entry(__field(int, fd) __field(int, nstype)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(nstype, nstype)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lseek
++SC_TRACE_EVENT(sys_lseek,
++ TP_PROTO(unsigned int fd, off_t offset, unsigned int origin),
++ TP_ARGS(fd, offset, origin),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(off_t, offset) __field(unsigned int, origin)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(offset, offset) tp_assign(origin, origin)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ioctl
++SC_TRACE_EVENT(sys_ioctl,
++ TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
++ TP_ARGS(fd, cmd, arg),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fcntl
++SC_TRACE_EVENT(sys_fcntl,
++ TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
++ TP_ARGS(fd, cmd, arg),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fchown16
++SC_TRACE_EVENT(sys_fchown16,
++ TP_PROTO(unsigned int fd, old_uid_t user, old_gid_t group),
++ TP_ARGS(fd, user, group),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(old_uid_t, user) __field(old_gid_t, group)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setpriority
++SC_TRACE_EVENT(sys_setpriority,
++ TP_PROTO(int which, int who, int niceval),
++ TP_ARGS(which, who, niceval),
++ TP_STRUCT__entry(__field(int, which) __field(int, who) __field(int, niceval)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who) tp_assign(niceval, niceval)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mprotect
++SC_TRACE_EVENT(sys_mprotect,
++ TP_PROTO(unsigned long start, size_t len, unsigned long prot),
++ TP_ARGS(start, len, prot),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field(unsigned long, prot)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(prot, prot)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sysfs
++SC_TRACE_EVENT(sys_sysfs,
++ TP_PROTO(int option, unsigned long arg1, unsigned long arg2),
++ TP_ARGS(option, arg1, arg2),
++ TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg1) __field(unsigned long, arg2)),
++ TP_fast_assign(tp_assign(option, option) tp_assign(arg1, arg1) tp_assign(arg2, arg2)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_msync
++SC_TRACE_EVENT(sys_msync,
++ TP_PROTO(unsigned long start, size_t len, int flags),
++ TP_ARGS(start, len, flags),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field(int, flags)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setresuid16
++SC_TRACE_EVENT(sys_setresuid16,
++ TP_PROTO(old_uid_t ruid, old_uid_t euid, old_uid_t suid),
++ TP_ARGS(ruid, euid, suid),
++ TP_STRUCT__entry(__field(old_uid_t, ruid) __field(old_uid_t, euid) __field(old_uid_t, suid)),
++ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setresgid16
++SC_TRACE_EVENT(sys_setresgid16,
++ TP_PROTO(old_gid_t rgid, old_gid_t egid, old_gid_t sgid),
++ TP_ARGS(rgid, egid, sgid),
++ TP_STRUCT__entry(__field(old_gid_t, rgid) __field(old_gid_t, egid) __field(old_gid_t, sgid)),
++ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fchown
++SC_TRACE_EVENT(sys_fchown,
++ TP_PROTO(unsigned int fd, uid_t user, gid_t group),
++ TP_ARGS(fd, user, group),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(uid_t, user) __field(gid_t, group)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setresuid
++SC_TRACE_EVENT(sys_setresuid,
++ TP_PROTO(uid_t ruid, uid_t euid, uid_t suid),
++ TP_ARGS(ruid, euid, suid),
++ TP_STRUCT__entry(__field(uid_t, ruid) __field(uid_t, euid) __field(uid_t, suid)),
++ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setresgid
++SC_TRACE_EVENT(sys_setresgid,
++ TP_PROTO(gid_t rgid, gid_t egid, gid_t sgid),
++ TP_ARGS(rgid, egid, sgid),
++ TP_STRUCT__entry(__field(gid_t, rgid) __field(gid_t, egid) __field(gid_t, sgid)),
++ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_madvise
++SC_TRACE_EVENT(sys_madvise,
++ TP_PROTO(unsigned long start, size_t len_in, int behavior),
++ TP_ARGS(start, len_in, behavior),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len_in) __field(int, behavior)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len_in, len_in) tp_assign(behavior, behavior)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fcntl64
++SC_TRACE_EVENT(sys_fcntl64,
++ TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
++ TP_ARGS(fd, cmd, arg),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_tgkill
++SC_TRACE_EVENT(sys_tgkill,
++ TP_PROTO(pid_t tgid, pid_t pid, int sig),
++ TP_ARGS(tgid, pid, sig),
++ TP_STRUCT__entry(__field(pid_t, tgid) __field(pid_t, pid) __field(int, sig)),
++ TP_fast_assign(tp_assign(tgid, tgid) tp_assign(pid, pid) tp_assign(sig, sig)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ioprio_set
++SC_TRACE_EVENT(sys_ioprio_set,
++ TP_PROTO(int which, int who, int ioprio),
++ TP_ARGS(which, who, ioprio),
++ TP_STRUCT__entry(__field(int, which) __field(int, who) __field(int, ioprio)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who) tp_assign(ioprio, ioprio)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_dup3
++SC_TRACE_EVENT(sys_dup3,
++ TP_PROTO(unsigned int oldfd, unsigned int newfd, int flags),
++ TP_ARGS(oldfd, newfd, flags),
++ TP_STRUCT__entry(__field(unsigned int, oldfd) __field(unsigned int, newfd) __field(int, flags)),
++ TP_fast_assign(tp_assign(oldfd, oldfd) tp_assign(newfd, newfd) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ptrace
++SC_TRACE_EVENT(sys_ptrace,
++ TP_PROTO(long request, long pid, unsigned long addr, unsigned long data),
++ TP_ARGS(request, pid, addr, data),
++ TP_STRUCT__entry(__field(long, request) __field(long, pid) __field_hex(unsigned long, addr) __field(unsigned long, data)),
++ TP_fast_assign(tp_assign(request, request) tp_assign(pid, pid) tp_assign(addr, addr) tp_assign(data, data)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_tee
++SC_TRACE_EVENT(sys_tee,
++ TP_PROTO(int fdin, int fdout, size_t len, unsigned int flags),
++ TP_ARGS(fdin, fdout, len, flags),
++ TP_STRUCT__entry(__field(int, fdin) __field(int, fdout) __field(size_t, len) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fdin, fdin) tp_assign(fdout, fdout) tp_assign(len, len) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mremap
++SC_TRACE_EVENT(sys_mremap,
++ TP_PROTO(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr),
++ TP_ARGS(addr, old_len, new_len, flags, new_addr),
++ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(unsigned long, old_len) __field(unsigned long, new_len) __field(unsigned long, flags) __field_hex(unsigned long, new_addr)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(old_len, old_len) tp_assign(new_len, new_len) tp_assign(flags, flags) tp_assign(new_addr, new_addr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_prctl
++SC_TRACE_EVENT(sys_prctl,
++ TP_PROTO(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5),
++ TP_ARGS(option, arg2, arg3, arg4, arg5),
++ TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg2) __field(unsigned long, arg3) __field(unsigned long, arg4) __field(unsigned long, arg5)),
++ TP_fast_assign(tp_assign(option, option) tp_assign(arg2, arg2) tp_assign(arg3, arg3) tp_assign(arg4, arg4) tp_assign(arg5, arg5)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_remap_file_pages
++SC_TRACE_EVENT(sys_remap_file_pages,
++ TP_PROTO(unsigned long start, unsigned long size, unsigned long prot, unsigned long pgoff, unsigned long flags),
++ TP_ARGS(start, size, prot, pgoff, flags),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(unsigned long, size) __field(unsigned long, prot) __field(unsigned long, pgoff) __field(unsigned long, flags)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(size, size) tp_assign(prot, prot) tp_assign(pgoff, pgoff) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_keyctl
++SC_TRACE_EVENT(sys_keyctl,
++ TP_PROTO(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5),
++ TP_ARGS(option, arg2, arg3, arg4, arg5),
++ TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg2) __field(unsigned long, arg3) __field(unsigned long, arg4) __field(unsigned long, arg5)),
++ TP_fast_assign(tp_assign(option, option) tp_assign(arg2, arg2) tp_assign(arg3, arg3) tp_assign(arg4, arg4) tp_assign(arg5, arg5)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mmap_pgoff
++SC_TRACE_EVENT(sys_mmap_pgoff,
++ TP_PROTO(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff),
++ TP_ARGS(addr, len, prot, flags, fd, pgoff),
++ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(unsigned long, len) __field(unsigned long, prot) __field(unsigned long, flags) __field(unsigned long, fd) __field(unsigned long, pgoff)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len) tp_assign(prot, prot) tp_assign(flags, flags) tp_assign(fd, fd) tp_assign(pgoff, pgoff)),
++ TP_printk()
++)
++#endif
++
++#endif /* _TRACE_SYSCALLS_INTEGERS_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
++
++#else /* CREATE_SYSCALL_TABLE */
++
++#include "x86-32-syscalls-3.1.0-rc6_integers_override.h"
++#include "syscalls_integers_override.h"
++
++#ifndef OVERRIDE_TABLE_32_sys_restart_syscall
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_restart_syscall, 0, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getpid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getpid, 20, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getuid16
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getuid16, 24, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_pause
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_pause, 29, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sync
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sync, 36, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getgid16
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getgid16, 47, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_geteuid16
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_geteuid16, 49, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getegid16
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getegid16, 50, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getppid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getppid, 64, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getpgrp
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getpgrp, 65, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setsid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_setsid, 66, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sgetmask
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sgetmask, 68, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_vhangup
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_vhangup, 111, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_munlockall
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_munlockall, 153, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_yield
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sched_yield, 158, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getuid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getuid, 199, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getgid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getgid, 200, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_geteuid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_geteuid, 201, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getegid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getegid, 202, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_gettid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_gettid, 224, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_inotify_init
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_inotify_init, 291, 0)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_exit
++TRACE_SYSCALL_TABLE(sys_exit, sys_exit, 1, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_close
++TRACE_SYSCALL_TABLE(sys_close, sys_close, 6, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lseek
++TRACE_SYSCALL_TABLE(sys_lseek, sys_lseek, 19, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setuid16
++TRACE_SYSCALL_TABLE(sys_setuid16, sys_setuid16, 23, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ptrace
++TRACE_SYSCALL_TABLE(sys_ptrace, sys_ptrace, 26, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_alarm
++TRACE_SYSCALL_TABLE(sys_alarm, sys_alarm, 27, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_nice
++TRACE_SYSCALL_TABLE(sys_nice, sys_nice, 34, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_kill
++TRACE_SYSCALL_TABLE(sys_kill, sys_kill, 37, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_dup
++TRACE_SYSCALL_TABLE(sys_dup, sys_dup, 41, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_brk
++TRACE_SYSCALL_TABLE(sys_brk, sys_brk, 45, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setgid16
++TRACE_SYSCALL_TABLE(sys_setgid16, sys_setgid16, 46, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_signal
++TRACE_SYSCALL_TABLE(sys_signal, sys_signal, 48, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ioctl
++TRACE_SYSCALL_TABLE(sys_ioctl, sys_ioctl, 54, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fcntl
++TRACE_SYSCALL_TABLE(sys_fcntl, sys_fcntl, 55, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setpgid
++TRACE_SYSCALL_TABLE(sys_setpgid, sys_setpgid, 57, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_umask
++TRACE_SYSCALL_TABLE(sys_umask, sys_umask, 60, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_dup2
++TRACE_SYSCALL_TABLE(sys_dup2, sys_dup2, 63, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ssetmask
++TRACE_SYSCALL_TABLE(sys_ssetmask, sys_ssetmask, 69, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setreuid16
++TRACE_SYSCALL_TABLE(sys_setreuid16, sys_setreuid16, 70, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setregid16
++TRACE_SYSCALL_TABLE(sys_setregid16, sys_setregid16, 71, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_munmap
++TRACE_SYSCALL_TABLE(sys_munmap, sys_munmap, 91, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ftruncate
++TRACE_SYSCALL_TABLE(sys_ftruncate, sys_ftruncate, 93, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fchmod
++TRACE_SYSCALL_TABLE(sys_fchmod, sys_fchmod, 94, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fchown16
++TRACE_SYSCALL_TABLE(sys_fchown16, sys_fchown16, 95, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getpriority
++TRACE_SYSCALL_TABLE(sys_getpriority, sys_getpriority, 96, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setpriority
++TRACE_SYSCALL_TABLE(sys_setpriority, sys_setpriority, 97, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fsync
++TRACE_SYSCALL_TABLE(sys_fsync, sys_fsync, 118, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mprotect
++TRACE_SYSCALL_TABLE(sys_mprotect, sys_mprotect, 125, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getpgid
++TRACE_SYSCALL_TABLE(sys_getpgid, sys_getpgid, 132, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fchdir
++TRACE_SYSCALL_TABLE(sys_fchdir, sys_fchdir, 133, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_bdflush
++TRACE_SYSCALL_TABLE(sys_bdflush, sys_bdflush, 134, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sysfs
++TRACE_SYSCALL_TABLE(sys_sysfs, sys_sysfs, 135, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_personality
++TRACE_SYSCALL_TABLE(sys_personality, sys_personality, 136, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setfsuid16
++TRACE_SYSCALL_TABLE(sys_setfsuid16, sys_setfsuid16, 138, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setfsgid16
++TRACE_SYSCALL_TABLE(sys_setfsgid16, sys_setfsgid16, 139, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_flock
++TRACE_SYSCALL_TABLE(sys_flock, sys_flock, 143, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_msync
++TRACE_SYSCALL_TABLE(sys_msync, sys_msync, 144, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getsid
++TRACE_SYSCALL_TABLE(sys_getsid, sys_getsid, 147, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fdatasync
++TRACE_SYSCALL_TABLE(sys_fdatasync, sys_fdatasync, 148, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mlock
++TRACE_SYSCALL_TABLE(sys_mlock, sys_mlock, 150, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_munlock
++TRACE_SYSCALL_TABLE(sys_munlock, sys_munlock, 151, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mlockall
++TRACE_SYSCALL_TABLE(sys_mlockall, sys_mlockall, 152, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_getscheduler
++TRACE_SYSCALL_TABLE(sys_sched_getscheduler, sys_sched_getscheduler, 157, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_get_priority_max
++TRACE_SYSCALL_TABLE(sys_sched_get_priority_max, sys_sched_get_priority_max, 159, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_get_priority_min
++TRACE_SYSCALL_TABLE(sys_sched_get_priority_min, sys_sched_get_priority_min, 160, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mremap
++TRACE_SYSCALL_TABLE(sys_mremap, sys_mremap, 163, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setresuid16
++TRACE_SYSCALL_TABLE(sys_setresuid16, sys_setresuid16, 164, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setresgid16
++TRACE_SYSCALL_TABLE(sys_setresgid16, sys_setresgid16, 170, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_prctl
++TRACE_SYSCALL_TABLE(sys_prctl, sys_prctl, 172, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mmap_pgoff
++TRACE_SYSCALL_TABLE(sys_mmap_pgoff, sys_mmap_pgoff, 192, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setreuid
++TRACE_SYSCALL_TABLE(sys_setreuid, sys_setreuid, 203, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setregid
++TRACE_SYSCALL_TABLE(sys_setregid, sys_setregid, 204, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fchown
++TRACE_SYSCALL_TABLE(sys_fchown, sys_fchown, 207, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setresuid
++TRACE_SYSCALL_TABLE(sys_setresuid, sys_setresuid, 208, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setresgid
++TRACE_SYSCALL_TABLE(sys_setresgid, sys_setresgid, 210, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setuid
++TRACE_SYSCALL_TABLE(sys_setuid, sys_setuid, 213, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setgid
++TRACE_SYSCALL_TABLE(sys_setgid, sys_setgid, 214, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setfsuid
++TRACE_SYSCALL_TABLE(sys_setfsuid, sys_setfsuid, 215, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setfsgid
++TRACE_SYSCALL_TABLE(sys_setfsgid, sys_setfsgid, 216, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_madvise
++TRACE_SYSCALL_TABLE(sys_madvise, sys_madvise, 219, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fcntl64
++TRACE_SYSCALL_TABLE(sys_fcntl64, sys_fcntl64, 221, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_tkill
++TRACE_SYSCALL_TABLE(sys_tkill, sys_tkill, 238, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_io_destroy
++TRACE_SYSCALL_TABLE(sys_io_destroy, sys_io_destroy, 246, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_exit_group
++TRACE_SYSCALL_TABLE(sys_exit_group, sys_exit_group, 252, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_epoll_create
++TRACE_SYSCALL_TABLE(sys_epoll_create, sys_epoll_create, 254, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_remap_file_pages
++TRACE_SYSCALL_TABLE(sys_remap_file_pages, sys_remap_file_pages, 257, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timer_getoverrun
++TRACE_SYSCALL_TABLE(sys_timer_getoverrun, sys_timer_getoverrun, 262, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timer_delete
++TRACE_SYSCALL_TABLE(sys_timer_delete, sys_timer_delete, 263, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_tgkill
++TRACE_SYSCALL_TABLE(sys_tgkill, sys_tgkill, 270, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_keyctl
++TRACE_SYSCALL_TABLE(sys_keyctl, sys_keyctl, 288, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ioprio_set
++TRACE_SYSCALL_TABLE(sys_ioprio_set, sys_ioprio_set, 289, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ioprio_get
++TRACE_SYSCALL_TABLE(sys_ioprio_get, sys_ioprio_get, 290, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_inotify_rm_watch
++TRACE_SYSCALL_TABLE(sys_inotify_rm_watch, sys_inotify_rm_watch, 293, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_unshare
++TRACE_SYSCALL_TABLE(sys_unshare, sys_unshare, 310, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_tee
++TRACE_SYSCALL_TABLE(sys_tee, sys_tee, 315, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timerfd_create
++TRACE_SYSCALL_TABLE(sys_timerfd_create, sys_timerfd_create, 322, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_eventfd
++TRACE_SYSCALL_TABLE(sys_eventfd, sys_eventfd, 323, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_eventfd2
++TRACE_SYSCALL_TABLE(sys_eventfd2, sys_eventfd2, 328, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_epoll_create1
++TRACE_SYSCALL_TABLE(sys_epoll_create1, sys_epoll_create1, 329, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_dup3
++TRACE_SYSCALL_TABLE(sys_dup3, sys_dup3, 330, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_inotify_init1
++TRACE_SYSCALL_TABLE(sys_inotify_init1, sys_inotify_init1, 332, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fanotify_init
++TRACE_SYSCALL_TABLE(sys_fanotify_init, sys_fanotify_init, 338, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_syncfs
++TRACE_SYSCALL_TABLE(sys_syncfs, sys_syncfs, 344, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setns
++TRACE_SYSCALL_TABLE(sys_setns, sys_setns, 346, 2)
++#endif
++
++#endif /* CREATE_SYSCALL_TABLE */
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_integers_override.h
+@@ -0,0 +1,38 @@
++#ifndef CONFIG_UID16
++
++#define OVERRIDE_32_sys_getuid16
++#define OVERRIDE_32_sys_getgid16
++#define OVERRIDE_32_sys_geteuid16
++#define OVERRIDE_32_sys_getegid16
++#define OVERRIDE_32_sys_setuid16
++#define OVERRIDE_32_sys_setgid16
++#define OVERRIDE_32_sys_setfsuid16
++#define OVERRIDE_32_sys_setfsgid16
++#define OVERRIDE_32_sys_setreuid16
++#define OVERRIDE_32_sys_setregid16
++#define OVERRIDE_32_sys_fchown16
++#define OVERRIDE_32_sys_setresuid16
++#define OVERRIDE_32_sys_setresgid16
++
++#define OVERRIDE_TABLE_32_sys_getuid16
++#define OVERRIDE_TABLE_32_sys_getgid16
++#define OVERRIDE_TABLE_32_sys_geteuid16
++#define OVERRIDE_TABLE_32_sys_getegid16
++#define OVERRIDE_TABLE_32_sys_setuid16
++#define OVERRIDE_TABLE_32_sys_setgid16
++#define OVERRIDE_TABLE_32_sys_setreuid16
++#define OVERRIDE_TABLE_32_sys_setregid16
++#define OVERRIDE_TABLE_32_sys_fchown16
++#define OVERRIDE_TABLE_32_sys_setfsuid16
++#define OVERRIDE_TABLE_32_sys_setfsgid16
++#define OVERRIDE_TABLE_32_sys_setresuid16
++#define OVERRIDE_TABLE_32_sys_setresgid16
++
++#endif
++
++#ifdef CREATE_SYSCALL_TABLE
++
++#define OVERRIDE_TABLE_32_sys_mmap
++TRACE_SYSCALL_TABLE(sys_mmap, sys_mmap, 90, 6)
++
++#endif
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers.h
+@@ -0,0 +1,2232 @@
++/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */
++#ifndef CREATE_SYSCALL_TABLE
++
++#if !defined(_TRACE_SYSCALLS_POINTERS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SYSCALLS_POINTERS_H
++
++#include <linux/tracepoint.h>
++#include <linux/syscalls.h>
++#include "x86-32-syscalls-3.1.0-rc6_pointers_override.h"
++#include "syscalls_pointers_override.h"
++
++#ifndef OVERRIDE_32_sys_unlink
++SC_TRACE_EVENT(sys_unlink,
++ TP_PROTO(const char * pathname),
++ TP_ARGS(pathname),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_chdir
++SC_TRACE_EVENT(sys_chdir,
++ TP_PROTO(const char * filename),
++ TP_ARGS(filename),
++ TP_STRUCT__entry(__string_from_user(filename, filename)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_time
++SC_TRACE_EVENT(sys_time,
++ TP_PROTO(time_t * tloc),
++ TP_ARGS(tloc),
++ TP_STRUCT__entry(__field_hex(time_t *, tloc)),
++ TP_fast_assign(tp_assign(tloc, tloc)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_oldumount
++SC_TRACE_EVENT(sys_oldumount,
++ TP_PROTO(char * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__string_from_user(name, name)),
++ TP_fast_assign(tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_stime
++SC_TRACE_EVENT(sys_stime,
++ TP_PROTO(time_t * tptr),
++ TP_ARGS(tptr),
++ TP_STRUCT__entry(__field_hex(time_t *, tptr)),
++ TP_fast_assign(tp_assign(tptr, tptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rmdir
++SC_TRACE_EVENT(sys_rmdir,
++ TP_PROTO(const char * pathname),
++ TP_ARGS(pathname),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_pipe
++SC_TRACE_EVENT(sys_pipe,
++ TP_PROTO(int * fildes),
++ TP_ARGS(fildes),
++ TP_STRUCT__entry(__field_hex(int *, fildes)),
++ TP_fast_assign(tp_assign(fildes, fildes)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_times
++SC_TRACE_EVENT(sys_times,
++ TP_PROTO(struct tms * tbuf),
++ TP_ARGS(tbuf),
++ TP_STRUCT__entry(__field_hex(struct tms *, tbuf)),
++ TP_fast_assign(tp_assign(tbuf, tbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_acct
++SC_TRACE_EVENT(sys_acct,
++ TP_PROTO(const char * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__string_from_user(name, name)),
++ TP_fast_assign(tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_olduname
++SC_TRACE_EVENT(sys_olduname,
++ TP_PROTO(struct oldold_utsname * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__field_hex(struct oldold_utsname *, name)),
++ TP_fast_assign(tp_assign(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_chroot
++SC_TRACE_EVENT(sys_chroot,
++ TP_PROTO(const char * filename),
++ TP_ARGS(filename),
++ TP_STRUCT__entry(__string_from_user(filename, filename)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sigpending
++SC_TRACE_EVENT(sys_sigpending,
++ TP_PROTO(old_sigset_t * set),
++ TP_ARGS(set),
++ TP_STRUCT__entry(__field_hex(old_sigset_t *, set)),
++ TP_fast_assign(tp_assign(set, set)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_old_select
++SC_TRACE_EVENT(sys_old_select,
++ TP_PROTO(struct sel_arg_struct * arg),
++ TP_ARGS(arg),
++ TP_STRUCT__entry(__field_hex(struct sel_arg_struct *, arg)),
++ TP_fast_assign(tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_uselib
++SC_TRACE_EVENT(sys_uselib,
++ TP_PROTO(const char * library),
++ TP_ARGS(library),
++ TP_STRUCT__entry(__field_hex(const char *, library)),
++ TP_fast_assign(tp_assign(library, library)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_old_mmap
++SC_TRACE_EVENT(sys_old_mmap,
++ TP_PROTO(struct mmap_arg_struct * arg),
++ TP_ARGS(arg),
++ TP_STRUCT__entry(__field_hex(struct mmap_arg_struct *, arg)),
++ TP_fast_assign(tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_uname
++SC_TRACE_EVENT(sys_uname,
++ TP_PROTO(struct old_utsname * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__field_hex(struct old_utsname *, name)),
++ TP_fast_assign(tp_assign(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_swapoff
++SC_TRACE_EVENT(sys_swapoff,
++ TP_PROTO(const char * specialfile),
++ TP_ARGS(specialfile),
++ TP_STRUCT__entry(__string_from_user(specialfile, specialfile)),
++ TP_fast_assign(tp_copy_string_from_user(specialfile, specialfile)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sysinfo
++SC_TRACE_EVENT(sys_sysinfo,
++ TP_PROTO(struct sysinfo * info),
++ TP_ARGS(info),
++ TP_STRUCT__entry(__field_hex(struct sysinfo *, info)),
++ TP_fast_assign(tp_assign(info, info)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_newuname
++SC_TRACE_EVENT(sys_newuname,
++ TP_PROTO(struct new_utsname * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__field_hex(struct new_utsname *, name)),
++ TP_fast_assign(tp_assign(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_adjtimex
++SC_TRACE_EVENT(sys_adjtimex,
++ TP_PROTO(struct timex * txc_p),
++ TP_ARGS(txc_p),
++ TP_STRUCT__entry(__field_hex(struct timex *, txc_p)),
++ TP_fast_assign(tp_assign(txc_p, txc_p)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sysctl
++SC_TRACE_EVENT(sys_sysctl,
++ TP_PROTO(struct __sysctl_args * args),
++ TP_ARGS(args),
++ TP_STRUCT__entry(__field_hex(struct __sysctl_args *, args)),
++ TP_fast_assign(tp_assign(args, args)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_set_tid_address
++SC_TRACE_EVENT(sys_set_tid_address,
++ TP_PROTO(int * tidptr),
++ TP_ARGS(tidptr),
++ TP_STRUCT__entry(__field_hex(int *, tidptr)),
++ TP_fast_assign(tp_assign(tidptr, tidptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mq_unlink
++SC_TRACE_EVENT(sys_mq_unlink,
++ TP_PROTO(const char * u_name),
++ TP_ARGS(u_name),
++ TP_STRUCT__entry(__string_from_user(u_name, u_name)),
++ TP_fast_assign(tp_copy_string_from_user(u_name, u_name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_creat
++SC_TRACE_EVENT(sys_creat,
++ TP_PROTO(const char * pathname, int mode),
++ TP_ARGS(pathname, mode),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(int, mode)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_link
++SC_TRACE_EVENT(sys_link,
++ TP_PROTO(const char * oldname, const char * newname),
++ TP_ARGS(oldname, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_chmod
++SC_TRACE_EVENT(sys_chmod,
++ TP_PROTO(const char * filename, mode_t mode),
++ TP_ARGS(filename, mode),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(mode_t, mode)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_stat
++SC_TRACE_EVENT(sys_stat,
++ TP_PROTO(const char * filename, struct __old_kernel_stat * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct __old_kernel_stat *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fstat
++SC_TRACE_EVENT(sys_fstat,
++ TP_PROTO(unsigned int fd, struct __old_kernel_stat * statbuf),
++ TP_ARGS(fd, statbuf),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct __old_kernel_stat *, statbuf)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_utime
++SC_TRACE_EVENT(sys_utime,
++ TP_PROTO(char * filename, struct utimbuf * times),
++ TP_ARGS(filename, times),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct utimbuf *, times)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(times, times)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_access
++SC_TRACE_EVENT(sys_access,
++ TP_PROTO(const char * filename, int mode),
++ TP_ARGS(filename, mode),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, mode)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rename
++SC_TRACE_EVENT(sys_rename,
++ TP_PROTO(const char * oldname, const char * newname),
++ TP_ARGS(oldname, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mkdir
++SC_TRACE_EVENT(sys_mkdir,
++ TP_PROTO(const char * pathname, int mode),
++ TP_ARGS(pathname, mode),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(int, mode)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_umount
++SC_TRACE_EVENT(sys_umount,
++ TP_PROTO(char * name, int flags),
++ TP_ARGS(name, flags),
++ TP_STRUCT__entry(__string_from_user(name, name) __field(int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ustat
++SC_TRACE_EVENT(sys_ustat,
++ TP_PROTO(unsigned dev, struct ustat * ubuf),
++ TP_ARGS(dev, ubuf),
++ TP_STRUCT__entry(__field(unsigned, dev) __field_hex(struct ustat *, ubuf)),
++ TP_fast_assign(tp_assign(dev, dev) tp_assign(ubuf, ubuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sethostname
++SC_TRACE_EVENT(sys_sethostname,
++ TP_PROTO(char * name, int len),
++ TP_ARGS(name, len),
++ TP_STRUCT__entry(__string_from_user(name, name) __field(int, len)),
++ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setrlimit
++SC_TRACE_EVENT(sys_setrlimit,
++ TP_PROTO(unsigned int resource, struct rlimit * rlim),
++ TP_ARGS(resource, rlim),
++ TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
++ TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_old_getrlimit
++SC_TRACE_EVENT(sys_old_getrlimit,
++ TP_PROTO(unsigned int resource, struct rlimit * rlim),
++ TP_ARGS(resource, rlim),
++ TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
++ TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getrusage
++SC_TRACE_EVENT(sys_getrusage,
++ TP_PROTO(int who, struct rusage * ru),
++ TP_ARGS(who, ru),
++ TP_STRUCT__entry(__field(int, who) __field_hex(struct rusage *, ru)),
++ TP_fast_assign(tp_assign(who, who) tp_assign(ru, ru)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_gettimeofday
++SC_TRACE_EVENT(sys_gettimeofday,
++ TP_PROTO(struct timeval * tv, struct timezone * tz),
++ TP_ARGS(tv, tz),
++ TP_STRUCT__entry(__field_hex(struct timeval *, tv) __field_hex(struct timezone *, tz)),
++ TP_fast_assign(tp_assign(tv, tv) tp_assign(tz, tz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_settimeofday
++SC_TRACE_EVENT(sys_settimeofday,
++ TP_PROTO(struct timeval * tv, struct timezone * tz),
++ TP_ARGS(tv, tz),
++ TP_STRUCT__entry(__field_hex(struct timeval *, tv) __field_hex(struct timezone *, tz)),
++ TP_fast_assign(tp_assign(tv, tv) tp_assign(tz, tz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getgroups16
++SC_TRACE_EVENT(sys_getgroups16,
++ TP_PROTO(int gidsetsize, old_gid_t * grouplist),
++ TP_ARGS(gidsetsize, grouplist),
++ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(old_gid_t *, grouplist)),
++ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setgroups16
++SC_TRACE_EVENT(sys_setgroups16,
++ TP_PROTO(int gidsetsize, old_gid_t * grouplist),
++ TP_ARGS(gidsetsize, grouplist),
++ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(old_gid_t *, grouplist)),
++ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_symlink
++SC_TRACE_EVENT(sys_symlink,
++ TP_PROTO(const char * oldname, const char * newname),
++ TP_ARGS(oldname, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lstat
++SC_TRACE_EVENT(sys_lstat,
++ TP_PROTO(const char * filename, struct __old_kernel_stat * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct __old_kernel_stat *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_swapon
++SC_TRACE_EVENT(sys_swapon,
++ TP_PROTO(const char * specialfile, int swap_flags),
++ TP_ARGS(specialfile, swap_flags),
++ TP_STRUCT__entry(__string_from_user(specialfile, specialfile) __field(int, swap_flags)),
++ TP_fast_assign(tp_copy_string_from_user(specialfile, specialfile) tp_assign(swap_flags, swap_flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_truncate
++SC_TRACE_EVENT(sys_truncate,
++ TP_PROTO(const char * path, long length),
++ TP_ARGS(path, length),
++ TP_STRUCT__entry(__string_from_user(path, path) __field(long, length)),
++ TP_fast_assign(tp_copy_string_from_user(path, path) tp_assign(length, length)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_statfs
++SC_TRACE_EVENT(sys_statfs,
++ TP_PROTO(const char * pathname, struct statfs * buf),
++ TP_ARGS(pathname, buf),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(struct statfs *, buf)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fstatfs
++SC_TRACE_EVENT(sys_fstatfs,
++ TP_PROTO(unsigned int fd, struct statfs * buf),
++ TP_ARGS(fd, buf),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct statfs *, buf)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_socketcall
++SC_TRACE_EVENT(sys_socketcall,
++ TP_PROTO(int call, unsigned long * args),
++ TP_ARGS(call, args),
++ TP_STRUCT__entry(__field(int, call) __field_hex(unsigned long *, args)),
++ TP_fast_assign(tp_assign(call, call) tp_assign(args, args)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getitimer
++SC_TRACE_EVENT(sys_getitimer,
++ TP_PROTO(int which, struct itimerval * value),
++ TP_ARGS(which, value),
++ TP_STRUCT__entry(__field(int, which) __field_hex(struct itimerval *, value)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(value, value)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_newstat
++SC_TRACE_EVENT(sys_newstat,
++ TP_PROTO(const char * filename, struct stat * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_newlstat
++SC_TRACE_EVENT(sys_newlstat,
++ TP_PROTO(const char * filename, struct stat * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_newfstat
++SC_TRACE_EVENT(sys_newfstat,
++ TP_PROTO(unsigned int fd, struct stat * statbuf),
++ TP_ARGS(fd, statbuf),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct stat *, statbuf)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setdomainname
++SC_TRACE_EVENT(sys_setdomainname,
++ TP_PROTO(char * name, int len),
++ TP_ARGS(name, len),
++ TP_STRUCT__entry(__string_from_user(name, name) __field(int, len)),
++ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_delete_module
++SC_TRACE_EVENT(sys_delete_module,
++ TP_PROTO(const char * name_user, unsigned int flags),
++ TP_ARGS(name_user, flags),
++ TP_STRUCT__entry(__string_from_user(name_user, name_user) __field(unsigned int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(name_user, name_user) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_setparam
++SC_TRACE_EVENT(sys_sched_setparam,
++ TP_PROTO(pid_t pid, struct sched_param * param),
++ TP_ARGS(pid, param),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct sched_param *, param)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(param, param)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_getparam
++SC_TRACE_EVENT(sys_sched_getparam,
++ TP_PROTO(pid_t pid, struct sched_param * param),
++ TP_ARGS(pid, param),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct sched_param *, param)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(param, param)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_rr_get_interval
++SC_TRACE_EVENT(sys_sched_rr_get_interval,
++ TP_PROTO(pid_t pid, struct timespec * interval),
++ TP_ARGS(pid, interval),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct timespec *, interval)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(interval, interval)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_nanosleep
++SC_TRACE_EVENT(sys_nanosleep,
++ TP_PROTO(struct timespec * rqtp, struct timespec * rmtp),
++ TP_ARGS(rqtp, rmtp),
++ TP_STRUCT__entry(__field_hex(struct timespec *, rqtp) __field_hex(struct timespec *, rmtp)),
++ TP_fast_assign(tp_assign(rqtp, rqtp) tp_assign(rmtp, rmtp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rt_sigpending
++SC_TRACE_EVENT(sys_rt_sigpending,
++ TP_PROTO(sigset_t * set, size_t sigsetsize),
++ TP_ARGS(set, sigsetsize),
++ TP_STRUCT__entry(__field_hex(sigset_t *, set) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(set, set) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rt_sigsuspend
++SC_TRACE_EVENT(sys_rt_sigsuspend,
++ TP_PROTO(sigset_t * unewset, size_t sigsetsize),
++ TP_ARGS(unewset, sigsetsize),
++ TP_STRUCT__entry(__field_hex(sigset_t *, unewset) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(unewset, unewset) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getcwd
++SC_TRACE_EVENT(sys_getcwd,
++ TP_PROTO(char * buf, unsigned long size),
++ TP_ARGS(buf, size),
++ TP_STRUCT__entry(__field_hex(char *, buf) __field(unsigned long, size)),
++ TP_fast_assign(tp_assign(buf, buf) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getrlimit
++SC_TRACE_EVENT(sys_getrlimit,
++ TP_PROTO(unsigned int resource, struct rlimit * rlim),
++ TP_ARGS(resource, rlim),
++ TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
++ TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_stat64
++SC_TRACE_EVENT(sys_stat64,
++ TP_PROTO(const char * filename, struct stat64 * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat64 *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lstat64
++SC_TRACE_EVENT(sys_lstat64,
++ TP_PROTO(const char * filename, struct stat64 * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat64 *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fstat64
++SC_TRACE_EVENT(sys_fstat64,
++ TP_PROTO(unsigned long fd, struct stat64 * statbuf),
++ TP_ARGS(fd, statbuf),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(struct stat64 *, statbuf)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getgroups
++SC_TRACE_EVENT(sys_getgroups,
++ TP_PROTO(int gidsetsize, gid_t * grouplist),
++ TP_ARGS(gidsetsize, grouplist),
++ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(gid_t *, grouplist)),
++ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setgroups
++SC_TRACE_EVENT(sys_setgroups,
++ TP_PROTO(int gidsetsize, gid_t * grouplist),
++ TP_ARGS(gidsetsize, grouplist),
++ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(gid_t *, grouplist)),
++ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_pivot_root
++SC_TRACE_EVENT(sys_pivot_root,
++ TP_PROTO(const char * new_root, const char * put_old),
++ TP_ARGS(new_root, put_old),
++ TP_STRUCT__entry(__string_from_user(new_root, new_root) __string_from_user(put_old, put_old)),
++ TP_fast_assign(tp_copy_string_from_user(new_root, new_root) tp_copy_string_from_user(put_old, put_old)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_removexattr
++SC_TRACE_EVENT(sys_removexattr,
++ TP_PROTO(const char * pathname, const char * name),
++ TP_ARGS(pathname, name),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lremovexattr
++SC_TRACE_EVENT(sys_lremovexattr,
++ TP_PROTO(const char * pathname, const char * name),
++ TP_ARGS(pathname, name),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fremovexattr
++SC_TRACE_EVENT(sys_fremovexattr,
++ TP_PROTO(int fd, const char * name),
++ TP_ARGS(fd, name),
++ TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name)),
++ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_io_setup
++SC_TRACE_EVENT(sys_io_setup,
++ TP_PROTO(unsigned nr_events, aio_context_t * ctxp),
++ TP_ARGS(nr_events, ctxp),
++ TP_STRUCT__entry(__field(unsigned, nr_events) __field_hex(aio_context_t *, ctxp)),
++ TP_fast_assign(tp_assign(nr_events, nr_events) tp_assign(ctxp, ctxp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timer_gettime
++SC_TRACE_EVENT(sys_timer_gettime,
++ TP_PROTO(timer_t timer_id, struct itimerspec * setting),
++ TP_ARGS(timer_id, setting),
++ TP_STRUCT__entry(__field(timer_t, timer_id) __field_hex(struct itimerspec *, setting)),
++ TP_fast_assign(tp_assign(timer_id, timer_id) tp_assign(setting, setting)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_clock_settime
++SC_TRACE_EVENT(sys_clock_settime,
++ TP_PROTO(const clockid_t which_clock, const struct timespec * tp),
++ TP_ARGS(which_clock, tp),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(const struct timespec *, tp)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_clock_gettime
++SC_TRACE_EVENT(sys_clock_gettime,
++ TP_PROTO(const clockid_t which_clock, struct timespec * tp),
++ TP_ARGS(which_clock, tp),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timespec *, tp)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_clock_getres
++SC_TRACE_EVENT(sys_clock_getres,
++ TP_PROTO(const clockid_t which_clock, struct timespec * tp),
++ TP_ARGS(which_clock, tp),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timespec *, tp)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_utimes
++SC_TRACE_EVENT(sys_utimes,
++ TP_PROTO(char * filename, struct timeval * utimes),
++ TP_ARGS(filename, utimes),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct timeval *, utimes)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mq_notify
++SC_TRACE_EVENT(sys_mq_notify,
++ TP_PROTO(mqd_t mqdes, const struct sigevent * u_notification),
++ TP_ARGS(mqdes, u_notification),
++ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const struct sigevent *, u_notification)),
++ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_notification, u_notification)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_set_robust_list
++SC_TRACE_EVENT(sys_set_robust_list,
++ TP_PROTO(struct robust_list_head * head, size_t len),
++ TP_ARGS(head, len),
++ TP_STRUCT__entry(__field_hex(struct robust_list_head *, head) __field(size_t, len)),
++ TP_fast_assign(tp_assign(head, head) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timerfd_gettime
++SC_TRACE_EVENT(sys_timerfd_gettime,
++ TP_PROTO(int ufd, struct itimerspec * otmr),
++ TP_ARGS(ufd, otmr),
++ TP_STRUCT__entry(__field(int, ufd) __field_hex(struct itimerspec *, otmr)),
++ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(otmr, otmr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_pipe2
++SC_TRACE_EVENT(sys_pipe2,
++ TP_PROTO(int * fildes, int flags),
++ TP_ARGS(fildes, flags),
++ TP_STRUCT__entry(__field_hex(int *, fildes) __field(int, flags)),
++ TP_fast_assign(tp_assign(fildes, fildes) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_clock_adjtime
++SC_TRACE_EVENT(sys_clock_adjtime,
++ TP_PROTO(const clockid_t which_clock, struct timex * utx),
++ TP_ARGS(which_clock, utx),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timex *, utx)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(utx, utx)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_read
++SC_TRACE_EVENT(sys_read,
++ TP_PROTO(unsigned int fd, char * buf, size_t count),
++ TP_ARGS(fd, buf, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(char *, buf) __field(size_t, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_write
++SC_TRACE_EVENT(sys_write,
++ TP_PROTO(unsigned int fd, const char * buf, size_t count),
++ TP_ARGS(fd, buf, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(const char *, buf) __field(size_t, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_open
++SC_TRACE_EVENT(sys_open,
++ TP_PROTO(const char * filename, int flags, int mode),
++ TP_ARGS(filename, flags, mode),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, flags) __field(int, mode)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(flags, flags) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_waitpid
++SC_TRACE_EVENT(sys_waitpid,
++ TP_PROTO(pid_t pid, int * stat_addr, int options),
++ TP_ARGS(pid, stat_addr, options),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(int *, stat_addr) __field(int, options)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(stat_addr, stat_addr) tp_assign(options, options)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mknod
++SC_TRACE_EVENT(sys_mknod,
++ TP_PROTO(const char * filename, int mode, unsigned dev),
++ TP_ARGS(filename, mode, dev),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, mode) __field(unsigned, dev)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode) tp_assign(dev, dev)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lchown16
++SC_TRACE_EVENT(sys_lchown16,
++ TP_PROTO(const char * filename, old_uid_t user, old_gid_t group),
++ TP_ARGS(filename, user, group),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(old_uid_t, user) __field(old_gid_t, group)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_readlink
++SC_TRACE_EVENT(sys_readlink,
++ TP_PROTO(const char * path, char * buf, int bufsiz),
++ TP_ARGS(path, buf, bufsiz),
++ TP_STRUCT__entry(__string_from_user(path, path) __field_hex(char *, buf) __field(int, bufsiz)),
++ TP_fast_assign(tp_copy_string_from_user(path, path) tp_assign(buf, buf) tp_assign(bufsiz, bufsiz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_old_readdir
++SC_TRACE_EVENT(sys_old_readdir,
++ TP_PROTO(unsigned int fd, struct old_linux_dirent * dirent, unsigned int count),
++ TP_ARGS(fd, dirent, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct old_linux_dirent *, dirent) __field(unsigned int, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_syslog
++SC_TRACE_EVENT(sys_syslog,
++ TP_PROTO(int type, char * buf, int len),
++ TP_ARGS(type, buf, len),
++ TP_STRUCT__entry(__field(int, type) __field_hex(char *, buf) __field(int, len)),
++ TP_fast_assign(tp_assign(type, type) tp_assign(buf, buf) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setitimer
++SC_TRACE_EVENT(sys_setitimer,
++ TP_PROTO(int which, struct itimerval * value, struct itimerval * ovalue),
++ TP_ARGS(which, value, ovalue),
++ TP_STRUCT__entry(__field(int, which) __field_hex(struct itimerval *, value) __field_hex(struct itimerval *, ovalue)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(value, value) tp_assign(ovalue, ovalue)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sigprocmask
++SC_TRACE_EVENT(sys_sigprocmask,
++ TP_PROTO(int how, old_sigset_t * nset, old_sigset_t * oset),
++ TP_ARGS(how, nset, oset),
++ TP_STRUCT__entry(__field(int, how) __field_hex(old_sigset_t *, nset) __field_hex(old_sigset_t *, oset)),
++ TP_fast_assign(tp_assign(how, how) tp_assign(nset, nset) tp_assign(oset, oset)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_init_module
++SC_TRACE_EVENT(sys_init_module,
++ TP_PROTO(void * umod, unsigned long len, const char * uargs),
++ TP_ARGS(umod, len, uargs),
++ TP_STRUCT__entry(__field_hex(void *, umod) __field(unsigned long, len) __field_hex(const char *, uargs)),
++ TP_fast_assign(tp_assign(umod, umod) tp_assign(len, len) tp_assign(uargs, uargs)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getdents
++SC_TRACE_EVENT(sys_getdents,
++ TP_PROTO(unsigned int fd, struct linux_dirent * dirent, unsigned int count),
++ TP_ARGS(fd, dirent, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct linux_dirent *, dirent) __field(unsigned int, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_readv
++SC_TRACE_EVENT(sys_readv,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen),
++ TP_ARGS(fd, vec, vlen),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_writev
++SC_TRACE_EVENT(sys_writev,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen),
++ TP_ARGS(fd, vec, vlen),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_setscheduler
++SC_TRACE_EVENT(sys_sched_setscheduler,
++ TP_PROTO(pid_t pid, int policy, struct sched_param * param),
++ TP_ARGS(pid, policy, param),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, policy) __field_hex(struct sched_param *, param)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(policy, policy) tp_assign(param, param)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getresuid16
++SC_TRACE_EVENT(sys_getresuid16,
++ TP_PROTO(old_uid_t * ruid, old_uid_t * euid, old_uid_t * suid),
++ TP_ARGS(ruid, euid, suid),
++ TP_STRUCT__entry(__field_hex(old_uid_t *, ruid) __field_hex(old_uid_t *, euid) __field_hex(old_uid_t *, suid)),
++ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_poll
++SC_TRACE_EVENT(sys_poll,
++ TP_PROTO(struct pollfd * ufds, unsigned int nfds, long timeout_msecs),
++ TP_ARGS(ufds, nfds, timeout_msecs),
++ TP_STRUCT__entry(__field_hex(struct pollfd *, ufds) __field(unsigned int, nfds) __field(long, timeout_msecs)),
++ TP_fast_assign(tp_assign(ufds, ufds) tp_assign(nfds, nfds) tp_assign(timeout_msecs, timeout_msecs)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getresgid16
++SC_TRACE_EVENT(sys_getresgid16,
++ TP_PROTO(old_gid_t * rgid, old_gid_t * egid, old_gid_t * sgid),
++ TP_ARGS(rgid, egid, sgid),
++ TP_STRUCT__entry(__field_hex(old_gid_t *, rgid) __field_hex(old_gid_t *, egid) __field_hex(old_gid_t *, sgid)),
++ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rt_sigqueueinfo
++SC_TRACE_EVENT(sys_rt_sigqueueinfo,
++ TP_PROTO(pid_t pid, int sig, siginfo_t * uinfo),
++ TP_ARGS(pid, sig, uinfo),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig) __field_hex(siginfo_t *, uinfo)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig) tp_assign(uinfo, uinfo)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_chown16
++SC_TRACE_EVENT(sys_chown16,
++ TP_PROTO(const char * filename, old_uid_t user, old_gid_t group),
++ TP_ARGS(filename, user, group),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(old_uid_t, user) __field(old_gid_t, group)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lchown
++SC_TRACE_EVENT(sys_lchown,
++ TP_PROTO(const char * filename, uid_t user, gid_t group),
++ TP_ARGS(filename, user, group),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getresuid
++SC_TRACE_EVENT(sys_getresuid,
++ TP_PROTO(uid_t * ruid, uid_t * euid, uid_t * suid),
++ TP_ARGS(ruid, euid, suid),
++ TP_STRUCT__entry(__field_hex(uid_t *, ruid) __field_hex(uid_t *, euid) __field_hex(uid_t *, suid)),
++ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getresgid
++SC_TRACE_EVENT(sys_getresgid,
++ TP_PROTO(gid_t * rgid, gid_t * egid, gid_t * sgid),
++ TP_ARGS(rgid, egid, sgid),
++ TP_STRUCT__entry(__field_hex(gid_t *, rgid) __field_hex(gid_t *, egid) __field_hex(gid_t *, sgid)),
++ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_chown
++SC_TRACE_EVENT(sys_chown,
++ TP_PROTO(const char * filename, uid_t user, gid_t group),
++ TP_ARGS(filename, user, group),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mincore
++SC_TRACE_EVENT(sys_mincore,
++ TP_PROTO(unsigned long start, size_t len, unsigned char * vec),
++ TP_ARGS(start, len, vec),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field_hex(unsigned char *, vec)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(vec, vec)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getdents64
++SC_TRACE_EVENT(sys_getdents64,
++ TP_PROTO(unsigned int fd, struct linux_dirent64 * dirent, unsigned int count),
++ TP_ARGS(fd, dirent, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct linux_dirent64 *, dirent) __field(unsigned int, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_listxattr
++SC_TRACE_EVENT(sys_listxattr,
++ TP_PROTO(const char * pathname, char * list, size_t size),
++ TP_ARGS(pathname, list, size),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(char *, list) __field(size_t, size)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(list, list) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_llistxattr
++SC_TRACE_EVENT(sys_llistxattr,
++ TP_PROTO(const char * pathname, char * list, size_t size),
++ TP_ARGS(pathname, list, size),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(char *, list) __field(size_t, size)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(list, list) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_flistxattr
++SC_TRACE_EVENT(sys_flistxattr,
++ TP_PROTO(int fd, char * list, size_t size),
++ TP_ARGS(fd, list, size),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(char *, list) __field(size_t, size)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(list, list) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_setaffinity
++SC_TRACE_EVENT(sys_sched_setaffinity,
++ TP_PROTO(pid_t pid, unsigned int len, unsigned long * user_mask_ptr),
++ TP_ARGS(pid, len, user_mask_ptr),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, len) __field_hex(unsigned long *, user_mask_ptr)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(len, len) tp_assign(user_mask_ptr, user_mask_ptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sched_getaffinity
++SC_TRACE_EVENT(sys_sched_getaffinity,
++ TP_PROTO(pid_t pid, unsigned int len, unsigned long * user_mask_ptr),
++ TP_ARGS(pid, len, user_mask_ptr),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, len) __field_hex(unsigned long *, user_mask_ptr)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(len, len) tp_assign(user_mask_ptr, user_mask_ptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_io_submit
++SC_TRACE_EVENT(sys_io_submit,
++ TP_PROTO(aio_context_t ctx_id, long nr, struct iocb * * iocbpp),
++ TP_ARGS(ctx_id, nr, iocbpp),
++ TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field(long, nr) __field_hex(struct iocb * *, iocbpp)),
++ TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(nr, nr) tp_assign(iocbpp, iocbpp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_io_cancel
++SC_TRACE_EVENT(sys_io_cancel,
++ TP_PROTO(aio_context_t ctx_id, struct iocb * iocb, struct io_event * result),
++ TP_ARGS(ctx_id, iocb, result),
++ TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field_hex(struct iocb *, iocb) __field_hex(struct io_event *, result)),
++ TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(iocb, iocb) tp_assign(result, result)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timer_create
++SC_TRACE_EVENT(sys_timer_create,
++ TP_PROTO(const clockid_t which_clock, struct sigevent * timer_event_spec, timer_t * created_timer_id),
++ TP_ARGS(which_clock, timer_event_spec, created_timer_id),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct sigevent *, timer_event_spec) __field_hex(timer_t *, created_timer_id)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(timer_event_spec, timer_event_spec) tp_assign(created_timer_id, created_timer_id)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_statfs64
++SC_TRACE_EVENT(sys_statfs64,
++ TP_PROTO(const char * pathname, size_t sz, struct statfs64 * buf),
++ TP_ARGS(pathname, sz, buf),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(size_t, sz) __field_hex(struct statfs64 *, buf)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(sz, sz) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fstatfs64
++SC_TRACE_EVENT(sys_fstatfs64,
++ TP_PROTO(unsigned int fd, size_t sz, struct statfs64 * buf),
++ TP_ARGS(fd, sz, buf),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(size_t, sz) __field_hex(struct statfs64 *, buf)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(sz, sz) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mq_getsetattr
++SC_TRACE_EVENT(sys_mq_getsetattr,
++ TP_PROTO(mqd_t mqdes, const struct mq_attr * u_mqstat, struct mq_attr * u_omqstat),
++ TP_ARGS(mqdes, u_mqstat, u_omqstat),
++ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const struct mq_attr *, u_mqstat) __field_hex(struct mq_attr *, u_omqstat)),
++ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_mqstat, u_mqstat) tp_assign(u_omqstat, u_omqstat)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_inotify_add_watch
++SC_TRACE_EVENT(sys_inotify_add_watch,
++ TP_PROTO(int fd, const char * pathname, u32 mask),
++ TP_ARGS(fd, pathname, mask),
++ TP_STRUCT__entry(__field(int, fd) __string_from_user(pathname, pathname) __field(u32, mask)),
++ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(pathname, pathname) tp_assign(mask, mask)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mkdirat
++SC_TRACE_EVENT(sys_mkdirat,
++ TP_PROTO(int dfd, const char * pathname, int mode),
++ TP_ARGS(dfd, pathname, mode),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field(int, mode)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_futimesat
++SC_TRACE_EVENT(sys_futimesat,
++ TP_PROTO(int dfd, const char * filename, struct timeval * utimes),
++ TP_ARGS(dfd, filename, utimes),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct timeval *, utimes)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_unlinkat
++SC_TRACE_EVENT(sys_unlinkat,
++ TP_PROTO(int dfd, const char * pathname, int flag),
++ TP_ARGS(dfd, pathname, flag),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field(int, flag)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(flag, flag)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_symlinkat
++SC_TRACE_EVENT(sys_symlinkat,
++ TP_PROTO(const char * oldname, int newdfd, const char * newname),
++ TP_ARGS(oldname, newdfd, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fchmodat
++SC_TRACE_EVENT(sys_fchmodat,
++ TP_PROTO(int dfd, const char * filename, mode_t mode),
++ TP_ARGS(dfd, filename, mode),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(mode_t, mode)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_faccessat
++SC_TRACE_EVENT(sys_faccessat,
++ TP_PROTO(int dfd, const char * filename, int mode),
++ TP_ARGS(dfd, filename, mode),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, mode)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_get_robust_list
++SC_TRACE_EVENT(sys_get_robust_list,
++ TP_PROTO(int pid, struct robust_list_head * * head_ptr, size_t * len_ptr),
++ TP_ARGS(pid, head_ptr, len_ptr),
++ TP_STRUCT__entry(__field(int, pid) __field_hex(struct robust_list_head * *, head_ptr) __field_hex(size_t *, len_ptr)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(head_ptr, head_ptr) tp_assign(len_ptr, len_ptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getcpu
++SC_TRACE_EVENT(sys_getcpu,
++ TP_PROTO(unsigned * cpup, unsigned * nodep, struct getcpu_cache * unused),
++ TP_ARGS(cpup, nodep, unused),
++ TP_STRUCT__entry(__field_hex(unsigned *, cpup) __field_hex(unsigned *, nodep) __field_hex(struct getcpu_cache *, unused)),
++ TP_fast_assign(tp_assign(cpup, cpup) tp_assign(nodep, nodep) tp_assign(unused, unused)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_signalfd
++SC_TRACE_EVENT(sys_signalfd,
++ TP_PROTO(int ufd, sigset_t * user_mask, size_t sizemask),
++ TP_ARGS(ufd, user_mask, sizemask),
++ TP_STRUCT__entry(__field(int, ufd) __field_hex(sigset_t *, user_mask) __field(size_t, sizemask)),
++ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(user_mask, user_mask) tp_assign(sizemask, sizemask)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_reboot
++SC_TRACE_EVENT(sys_reboot,
++ TP_PROTO(int magic1, int magic2, unsigned int cmd, void * arg),
++ TP_ARGS(magic1, magic2, cmd, arg),
++ TP_STRUCT__entry(__field(int, magic1) __field(int, magic2) __field(unsigned int, cmd) __field_hex(void *, arg)),
++ TP_fast_assign(tp_assign(magic1, magic1) tp_assign(magic2, magic2) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_wait4
++SC_TRACE_EVENT(sys_wait4,
++ TP_PROTO(pid_t upid, int * stat_addr, int options, struct rusage * ru),
++ TP_ARGS(upid, stat_addr, options, ru),
++ TP_STRUCT__entry(__field(pid_t, upid) __field_hex(int *, stat_addr) __field(int, options) __field_hex(struct rusage *, ru)),
++ TP_fast_assign(tp_assign(upid, upid) tp_assign(stat_addr, stat_addr) tp_assign(options, options) tp_assign(ru, ru)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_quotactl
++SC_TRACE_EVENT(sys_quotactl,
++ TP_PROTO(unsigned int cmd, const char * special, qid_t id, void * addr),
++ TP_ARGS(cmd, special, id, addr),
++ TP_STRUCT__entry(__field(unsigned int, cmd) __field_hex(const char *, special) __field(qid_t, id) __field_hex(void *, addr)),
++ TP_fast_assign(tp_assign(cmd, cmd) tp_assign(special, special) tp_assign(id, id) tp_assign(addr, addr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rt_sigaction
++SC_TRACE_EVENT(sys_rt_sigaction,
++ TP_PROTO(int sig, const struct sigaction * act, struct sigaction * oact, size_t sigsetsize),
++ TP_ARGS(sig, act, oact, sigsetsize),
++ TP_STRUCT__entry(__field(int, sig) __field_hex(const struct sigaction *, act) __field_hex(struct sigaction *, oact) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(sig, sig) tp_assign(act, act) tp_assign(oact, oact) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rt_sigprocmask
++SC_TRACE_EVENT(sys_rt_sigprocmask,
++ TP_PROTO(int how, sigset_t * nset, sigset_t * oset, size_t sigsetsize),
++ TP_ARGS(how, nset, oset, sigsetsize),
++ TP_STRUCT__entry(__field(int, how) __field_hex(sigset_t *, nset) __field_hex(sigset_t *, oset) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(how, how) tp_assign(nset, nset) tp_assign(oset, oset) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rt_sigtimedwait
++SC_TRACE_EVENT(sys_rt_sigtimedwait,
++ TP_PROTO(const sigset_t * uthese, siginfo_t * uinfo, const struct timespec * uts, size_t sigsetsize),
++ TP_ARGS(uthese, uinfo, uts, sigsetsize),
++ TP_STRUCT__entry(__field_hex(const sigset_t *, uthese) __field_hex(siginfo_t *, uinfo) __field_hex(const struct timespec *, uts) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(uthese, uthese) tp_assign(uinfo, uinfo) tp_assign(uts, uts) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sendfile
++SC_TRACE_EVENT(sys_sendfile,
++ TP_PROTO(int out_fd, int in_fd, off_t * offset, size_t count),
++ TP_ARGS(out_fd, in_fd, offset, count),
++ TP_STRUCT__entry(__field(int, out_fd) __field(int, in_fd) __field_hex(off_t *, offset) __field(size_t, count)),
++ TP_fast_assign(tp_assign(out_fd, out_fd) tp_assign(in_fd, in_fd) tp_assign(offset, offset) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_getxattr
++SC_TRACE_EVENT(sys_getxattr,
++ TP_PROTO(const char * pathname, const char * name, void * value, size_t size),
++ TP_ARGS(pathname, name, value, size),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lgetxattr
++SC_TRACE_EVENT(sys_lgetxattr,
++ TP_PROTO(const char * pathname, const char * name, void * value, size_t size),
++ TP_ARGS(pathname, name, value, size),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fgetxattr
++SC_TRACE_EVENT(sys_fgetxattr,
++ TP_PROTO(int fd, const char * name, void * value, size_t size),
++ TP_ARGS(fd, name, value, size),
++ TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
++ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sendfile64
++SC_TRACE_EVENT(sys_sendfile64,
++ TP_PROTO(int out_fd, int in_fd, loff_t * offset, size_t count),
++ TP_ARGS(out_fd, in_fd, offset, count),
++ TP_STRUCT__entry(__field(int, out_fd) __field(int, in_fd) __field_hex(loff_t *, offset) __field(size_t, count)),
++ TP_fast_assign(tp_assign(out_fd, out_fd) tp_assign(in_fd, in_fd) tp_assign(offset, offset) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_epoll_ctl
++SC_TRACE_EVENT(sys_epoll_ctl,
++ TP_PROTO(int epfd, int op, int fd, struct epoll_event * event),
++ TP_ARGS(epfd, op, fd, event),
++ TP_STRUCT__entry(__field(int, epfd) __field(int, op) __field(int, fd) __field_hex(struct epoll_event *, event)),
++ TP_fast_assign(tp_assign(epfd, epfd) tp_assign(op, op) tp_assign(fd, fd) tp_assign(event, event)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_epoll_wait
++SC_TRACE_EVENT(sys_epoll_wait,
++ TP_PROTO(int epfd, struct epoll_event * events, int maxevents, int timeout),
++ TP_ARGS(epfd, events, maxevents, timeout),
++ TP_STRUCT__entry(__field(int, epfd) __field_hex(struct epoll_event *, events) __field(int, maxevents) __field(int, timeout)),
++ TP_fast_assign(tp_assign(epfd, epfd) tp_assign(events, events) tp_assign(maxevents, maxevents) tp_assign(timeout, timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timer_settime
++SC_TRACE_EVENT(sys_timer_settime,
++ TP_PROTO(timer_t timer_id, int flags, const struct itimerspec * new_setting, struct itimerspec * old_setting),
++ TP_ARGS(timer_id, flags, new_setting, old_setting),
++ TP_STRUCT__entry(__field(timer_t, timer_id) __field(int, flags) __field_hex(const struct itimerspec *, new_setting) __field_hex(struct itimerspec *, old_setting)),
++ TP_fast_assign(tp_assign(timer_id, timer_id) tp_assign(flags, flags) tp_assign(new_setting, new_setting) tp_assign(old_setting, old_setting)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_clock_nanosleep
++SC_TRACE_EVENT(sys_clock_nanosleep,
++ TP_PROTO(const clockid_t which_clock, int flags, const struct timespec * rqtp, struct timespec * rmtp),
++ TP_ARGS(which_clock, flags, rqtp, rmtp),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field(int, flags) __field_hex(const struct timespec *, rqtp) __field_hex(struct timespec *, rmtp)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(flags, flags) tp_assign(rqtp, rqtp) tp_assign(rmtp, rmtp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mq_open
++SC_TRACE_EVENT(sys_mq_open,
++ TP_PROTO(const char * u_name, int oflag, mode_t mode, struct mq_attr * u_attr),
++ TP_ARGS(u_name, oflag, mode, u_attr),
++ TP_STRUCT__entry(__string_from_user(u_name, u_name) __field(int, oflag) __field(mode_t, mode) __field_hex(struct mq_attr *, u_attr)),
++ TP_fast_assign(tp_copy_string_from_user(u_name, u_name) tp_assign(oflag, oflag) tp_assign(mode, mode) tp_assign(u_attr, u_attr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_kexec_load
++SC_TRACE_EVENT(sys_kexec_load,
++ TP_PROTO(unsigned long entry, unsigned long nr_segments, struct kexec_segment * segments, unsigned long flags),
++ TP_ARGS(entry, nr_segments, segments, flags),
++ TP_STRUCT__entry(__field(unsigned long, entry) __field(unsigned long, nr_segments) __field_hex(struct kexec_segment *, segments) __field(unsigned long, flags)),
++ TP_fast_assign(tp_assign(entry, entry) tp_assign(nr_segments, nr_segments) tp_assign(segments, segments) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_request_key
++SC_TRACE_EVENT(sys_request_key,
++ TP_PROTO(const char * _type, const char * _description, const char * _callout_info, key_serial_t destringid),
++ TP_ARGS(_type, _description, _callout_info, destringid),
++ TP_STRUCT__entry(__string_from_user(_type, _type) __field_hex(const char *, _description) __field_hex(const char *, _callout_info) __field(key_serial_t, destringid)),
++ TP_fast_assign(tp_copy_string_from_user(_type, _type) tp_assign(_description, _description) tp_assign(_callout_info, _callout_info) tp_assign(destringid, destringid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_openat
++SC_TRACE_EVENT(sys_openat,
++ TP_PROTO(int dfd, const char * filename, int flags, int mode),
++ TP_ARGS(dfd, filename, flags, mode),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, flags) __field(int, mode)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(flags, flags) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mknodat
++SC_TRACE_EVENT(sys_mknodat,
++ TP_PROTO(int dfd, const char * filename, int mode, unsigned dev),
++ TP_ARGS(dfd, filename, mode, dev),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, mode) __field(unsigned, dev)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode) tp_assign(dev, dev)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fstatat64
++SC_TRACE_EVENT(sys_fstatat64,
++ TP_PROTO(int dfd, const char * filename, struct stat64 * statbuf, int flag),
++ TP_ARGS(dfd, filename, statbuf, flag),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct stat64 *, statbuf) __field(int, flag)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf) tp_assign(flag, flag)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_renameat
++SC_TRACE_EVENT(sys_renameat,
++ TP_PROTO(int olddfd, const char * oldname, int newdfd, const char * newname),
++ TP_ARGS(olddfd, oldname, newdfd, newname),
++ TP_STRUCT__entry(__field(int, olddfd) __string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_assign(olddfd, olddfd) tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_readlinkat
++SC_TRACE_EVENT(sys_readlinkat,
++ TP_PROTO(int dfd, const char * pathname, char * buf, int bufsiz),
++ TP_ARGS(dfd, pathname, buf, bufsiz),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field_hex(char *, buf) __field(int, bufsiz)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(buf, buf) tp_assign(bufsiz, bufsiz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_vmsplice
++SC_TRACE_EVENT(sys_vmsplice,
++ TP_PROTO(int fd, const struct iovec * iov, unsigned long nr_segs, unsigned int flags),
++ TP_ARGS(fd, iov, nr_segs, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(const struct iovec *, iov) __field(unsigned long, nr_segs) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(iov, iov) tp_assign(nr_segs, nr_segs) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_utimensat
++SC_TRACE_EVENT(sys_utimensat,
++ TP_PROTO(int dfd, const char * filename, struct timespec * utimes, int flags),
++ TP_ARGS(dfd, filename, utimes, flags),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct timespec *, utimes) __field(int, flags)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_timerfd_settime
++SC_TRACE_EVENT(sys_timerfd_settime,
++ TP_PROTO(int ufd, int flags, const struct itimerspec * utmr, struct itimerspec * otmr),
++ TP_ARGS(ufd, flags, utmr, otmr),
++ TP_STRUCT__entry(__field(int, ufd) __field(int, flags) __field_hex(const struct itimerspec *, utmr) __field_hex(struct itimerspec *, otmr)),
++ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(flags, flags) tp_assign(utmr, utmr) tp_assign(otmr, otmr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_signalfd4
++SC_TRACE_EVENT(sys_signalfd4,
++ TP_PROTO(int ufd, sigset_t * user_mask, size_t sizemask, int flags),
++ TP_ARGS(ufd, user_mask, sizemask, flags),
++ TP_STRUCT__entry(__field(int, ufd) __field_hex(sigset_t *, user_mask) __field(size_t, sizemask) __field(int, flags)),
++ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(user_mask, user_mask) tp_assign(sizemask, sizemask) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_rt_tgsigqueueinfo
++SC_TRACE_EVENT(sys_rt_tgsigqueueinfo,
++ TP_PROTO(pid_t tgid, pid_t pid, int sig, siginfo_t * uinfo),
++ TP_ARGS(tgid, pid, sig, uinfo),
++ TP_STRUCT__entry(__field(pid_t, tgid) __field(pid_t, pid) __field(int, sig) __field_hex(siginfo_t *, uinfo)),
++ TP_fast_assign(tp_assign(tgid, tgid) tp_assign(pid, pid) tp_assign(sig, sig) tp_assign(uinfo, uinfo)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_prlimit64
++SC_TRACE_EVENT(sys_prlimit64,
++ TP_PROTO(pid_t pid, unsigned int resource, const struct rlimit64 * new_rlim, struct rlimit64 * old_rlim),
++ TP_ARGS(pid, resource, new_rlim, old_rlim),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, resource) __field_hex(const struct rlimit64 *, new_rlim) __field_hex(struct rlimit64 *, old_rlim)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(resource, resource) tp_assign(new_rlim, new_rlim) tp_assign(old_rlim, old_rlim)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_sendmmsg
++SC_TRACE_EVENT(sys_sendmmsg,
++ TP_PROTO(int fd, struct mmsghdr * mmsg, unsigned int vlen, unsigned int flags),
++ TP_ARGS(fd, mmsg, vlen, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct mmsghdr *, mmsg) __field(unsigned int, vlen) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(mmsg, mmsg) tp_assign(vlen, vlen) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mount
++SC_TRACE_EVENT(sys_mount,
++ TP_PROTO(char * dev_name, char * dir_name, char * type, unsigned long flags, void * data),
++ TP_ARGS(dev_name, dir_name, type, flags, data),
++ TP_STRUCT__entry(__string_from_user(dev_name, dev_name) __string_from_user(dir_name, dir_name) __string_from_user(type, type) __field(unsigned long, flags) __field_hex(void *, data)),
++ TP_fast_assign(tp_copy_string_from_user(dev_name, dev_name) tp_copy_string_from_user(dir_name, dir_name) tp_copy_string_from_user(type, type) tp_assign(flags, flags) tp_assign(data, data)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_llseek
++SC_TRACE_EVENT(sys_llseek,
++ TP_PROTO(unsigned int fd, unsigned long offset_high, unsigned long offset_low, loff_t * result, unsigned int origin),
++ TP_ARGS(fd, offset_high, offset_low, result, origin),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned long, offset_high) __field(unsigned long, offset_low) __field_hex(loff_t *, result) __field(unsigned int, origin)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(offset_high, offset_high) tp_assign(offset_low, offset_low) tp_assign(result, result) tp_assign(origin, origin)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_select
++SC_TRACE_EVENT(sys_select,
++ TP_PROTO(int n, fd_set * inp, fd_set * outp, fd_set * exp, struct timeval * tvp),
++ TP_ARGS(n, inp, outp, exp, tvp),
++ TP_STRUCT__entry(__field(int, n) __field_hex(fd_set *, inp) __field_hex(fd_set *, outp) __field_hex(fd_set *, exp) __field_hex(struct timeval *, tvp)),
++ TP_fast_assign(tp_assign(n, n) tp_assign(inp, inp) tp_assign(outp, outp) tp_assign(exp, exp) tp_assign(tvp, tvp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_setxattr
++SC_TRACE_EVENT(sys_setxattr,
++ TP_PROTO(const char * pathname, const char * name, const void * value, size_t size, int flags),
++ TP_ARGS(pathname, name, value, size, flags),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_lsetxattr
++SC_TRACE_EVENT(sys_lsetxattr,
++ TP_PROTO(const char * pathname, const char * name, const void * value, size_t size, int flags),
++ TP_ARGS(pathname, name, value, size, flags),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fsetxattr
++SC_TRACE_EVENT(sys_fsetxattr,
++ TP_PROTO(int fd, const char * name, const void * value, size_t size, int flags),
++ TP_ARGS(fd, name, value, size, flags),
++ TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_io_getevents
++SC_TRACE_EVENT(sys_io_getevents,
++ TP_PROTO(aio_context_t ctx_id, long min_nr, long nr, struct io_event * events, struct timespec * timeout),
++ TP_ARGS(ctx_id, min_nr, nr, events, timeout),
++ TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field(long, min_nr) __field(long, nr) __field_hex(struct io_event *, events) __field_hex(struct timespec *, timeout)),
++ TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(min_nr, min_nr) tp_assign(nr, nr) tp_assign(events, events) tp_assign(timeout, timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mq_timedsend
++SC_TRACE_EVENT(sys_mq_timedsend,
++ TP_PROTO(mqd_t mqdes, const char * u_msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec * u_abs_timeout),
++ TP_ARGS(mqdes, u_msg_ptr, msg_len, msg_prio, u_abs_timeout),
++ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const char *, u_msg_ptr) __field(size_t, msg_len) __field(unsigned int, msg_prio) __field_hex(const struct timespec *, u_abs_timeout)),
++ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_msg_ptr, u_msg_ptr) tp_assign(msg_len, msg_len) tp_assign(msg_prio, msg_prio) tp_assign(u_abs_timeout, u_abs_timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_mq_timedreceive
++SC_TRACE_EVENT(sys_mq_timedreceive,
++ TP_PROTO(mqd_t mqdes, char * u_msg_ptr, size_t msg_len, unsigned int * u_msg_prio, const struct timespec * u_abs_timeout),
++ TP_ARGS(mqdes, u_msg_ptr, msg_len, u_msg_prio, u_abs_timeout),
++ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(char *, u_msg_ptr) __field(size_t, msg_len) __field_hex(unsigned int *, u_msg_prio) __field_hex(const struct timespec *, u_abs_timeout)),
++ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_msg_ptr, u_msg_ptr) tp_assign(msg_len, msg_len) tp_assign(u_msg_prio, u_msg_prio) tp_assign(u_abs_timeout, u_abs_timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_waitid
++SC_TRACE_EVENT(sys_waitid,
++ TP_PROTO(int which, pid_t upid, struct siginfo * infop, int options, struct rusage * ru),
++ TP_ARGS(which, upid, infop, options, ru),
++ TP_STRUCT__entry(__field(int, which) __field(pid_t, upid) __field_hex(struct siginfo *, infop) __field(int, options) __field_hex(struct rusage *, ru)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(upid, upid) tp_assign(infop, infop) tp_assign(options, options) tp_assign(ru, ru)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_add_key
++SC_TRACE_EVENT(sys_add_key,
++ TP_PROTO(const char * _type, const char * _description, const void * _payload, size_t plen, key_serial_t ringid),
++ TP_ARGS(_type, _description, _payload, plen, ringid),
++ TP_STRUCT__entry(__string_from_user(_type, _type) __field_hex(const char *, _description) __field_hex(const void *, _payload) __field(size_t, plen) __field(key_serial_t, ringid)),
++ TP_fast_assign(tp_copy_string_from_user(_type, _type) tp_assign(_description, _description) tp_assign(_payload, _payload) tp_assign(plen, plen) tp_assign(ringid, ringid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_fchownat
++SC_TRACE_EVENT(sys_fchownat,
++ TP_PROTO(int dfd, const char * filename, uid_t user, gid_t group, int flag),
++ TP_ARGS(dfd, filename, user, group, flag),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group) __field(int, flag)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group) tp_assign(flag, flag)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_linkat
++SC_TRACE_EVENT(sys_linkat,
++ TP_PROTO(int olddfd, const char * oldname, int newdfd, const char * newname, int flags),
++ TP_ARGS(olddfd, oldname, newdfd, newname, flags),
++ TP_STRUCT__entry(__field(int, olddfd) __string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname) __field(int, flags)),
++ TP_fast_assign(tp_assign(olddfd, olddfd) tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ppoll
++SC_TRACE_EVENT(sys_ppoll,
++ TP_PROTO(struct pollfd * ufds, unsigned int nfds, struct timespec * tsp, const sigset_t * sigmask, size_t sigsetsize),
++ TP_ARGS(ufds, nfds, tsp, sigmask, sigsetsize),
++ TP_STRUCT__entry(__field_hex(struct pollfd *, ufds) __field(unsigned int, nfds) __field_hex(struct timespec *, tsp) __field_hex(const sigset_t *, sigmask) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(ufds, ufds) tp_assign(nfds, nfds) tp_assign(tsp, tsp) tp_assign(sigmask, sigmask) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_preadv
++SC_TRACE_EVENT(sys_preadv,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h),
++ TP_ARGS(fd, vec, vlen, pos_l, pos_h),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen) __field(unsigned long, pos_l) __field(unsigned long, pos_h)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen) tp_assign(pos_l, pos_l) tp_assign(pos_h, pos_h)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_pwritev
++SC_TRACE_EVENT(sys_pwritev,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h),
++ TP_ARGS(fd, vec, vlen, pos_l, pos_h),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen) __field(unsigned long, pos_l) __field(unsigned long, pos_h)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen) tp_assign(pos_l, pos_l) tp_assign(pos_h, pos_h)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_perf_event_open
++SC_TRACE_EVENT(sys_perf_event_open,
++ TP_PROTO(struct perf_event_attr * attr_uptr, pid_t pid, int cpu, int group_fd, unsigned long flags),
++ TP_ARGS(attr_uptr, pid, cpu, group_fd, flags),
++ TP_STRUCT__entry(__field_hex(struct perf_event_attr *, attr_uptr) __field(pid_t, pid) __field(int, cpu) __field(int, group_fd) __field(unsigned long, flags)),
++ TP_fast_assign(tp_assign(attr_uptr, attr_uptr) tp_assign(pid, pid) tp_assign(cpu, cpu) tp_assign(group_fd, group_fd) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_recvmmsg
++SC_TRACE_EVENT(sys_recvmmsg,
++ TP_PROTO(int fd, struct mmsghdr * mmsg, unsigned int vlen, unsigned int flags, struct timespec * timeout),
++ TP_ARGS(fd, mmsg, vlen, flags, timeout),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct mmsghdr *, mmsg) __field(unsigned int, vlen) __field(unsigned int, flags) __field_hex(struct timespec *, timeout)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(mmsg, mmsg) tp_assign(vlen, vlen) tp_assign(flags, flags) tp_assign(timeout, timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_ipc
++SC_TRACE_EVENT(sys_ipc,
++ TP_PROTO(unsigned int call, int first, unsigned long second, unsigned long third, void * ptr, long fifth),
++ TP_ARGS(call, first, second, third, ptr, fifth),
++ TP_STRUCT__entry(__field(unsigned int, call) __field(int, first) __field(unsigned long, second) __field(unsigned long, third) __field_hex(void *, ptr) __field(long, fifth)),
++ TP_fast_assign(tp_assign(call, call) tp_assign(first, first) tp_assign(second, second) tp_assign(third, third) tp_assign(ptr, ptr) tp_assign(fifth, fifth)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_futex
++SC_TRACE_EVENT(sys_futex,
++ TP_PROTO(u32 * uaddr, int op, u32 val, struct timespec * utime, u32 * uaddr2, u32 val3),
++ TP_ARGS(uaddr, op, val, utime, uaddr2, val3),
++ TP_STRUCT__entry(__field_hex(u32 *, uaddr) __field(int, op) __field(u32, val) __field_hex(struct timespec *, utime) __field_hex(u32 *, uaddr2) __field(u32, val3)),
++ TP_fast_assign(tp_assign(uaddr, uaddr) tp_assign(op, op) tp_assign(val, val) tp_assign(utime, utime) tp_assign(uaddr2, uaddr2) tp_assign(val3, val3)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_pselect6
++SC_TRACE_EVENT(sys_pselect6,
++ TP_PROTO(int n, fd_set * inp, fd_set * outp, fd_set * exp, struct timespec * tsp, void * sig),
++ TP_ARGS(n, inp, outp, exp, tsp, sig),
++ TP_STRUCT__entry(__field(int, n) __field_hex(fd_set *, inp) __field_hex(fd_set *, outp) __field_hex(fd_set *, exp) __field_hex(struct timespec *, tsp) __field_hex(void *, sig)),
++ TP_fast_assign(tp_assign(n, n) tp_assign(inp, inp) tp_assign(outp, outp) tp_assign(exp, exp) tp_assign(tsp, tsp) tp_assign(sig, sig)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_splice
++SC_TRACE_EVENT(sys_splice,
++ TP_PROTO(int fd_in, loff_t * off_in, int fd_out, loff_t * off_out, size_t len, unsigned int flags),
++ TP_ARGS(fd_in, off_in, fd_out, off_out, len, flags),
++ TP_STRUCT__entry(__field(int, fd_in) __field_hex(loff_t *, off_in) __field(int, fd_out) __field_hex(loff_t *, off_out) __field(size_t, len) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd_in, fd_in) tp_assign(off_in, off_in) tp_assign(fd_out, fd_out) tp_assign(off_out, off_out) tp_assign(len, len) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_32_sys_epoll_pwait
++SC_TRACE_EVENT(sys_epoll_pwait,
++ TP_PROTO(int epfd, struct epoll_event * events, int maxevents, int timeout, const sigset_t * sigmask, size_t sigsetsize),
++ TP_ARGS(epfd, events, maxevents, timeout, sigmask, sigsetsize),
++ TP_STRUCT__entry(__field(int, epfd) __field_hex(struct epoll_event *, events) __field(int, maxevents) __field(int, timeout) __field_hex(const sigset_t *, sigmask) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(epfd, epfd) tp_assign(events, events) tp_assign(maxevents, maxevents) tp_assign(timeout, timeout) tp_assign(sigmask, sigmask) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++
++#endif /* _TRACE_SYSCALLS_POINTERS_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
++
++#else /* CREATE_SYSCALL_TABLE */
++
++#include "x86-32-syscalls-3.1.0-rc6_pointers_override.h"
++#include "syscalls_pointers_override.h"
++
++#ifndef OVERRIDE_TABLE_32_sys_read
++TRACE_SYSCALL_TABLE(sys_read, sys_read, 3, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_write
++TRACE_SYSCALL_TABLE(sys_write, sys_write, 4, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_open
++TRACE_SYSCALL_TABLE(sys_open, sys_open, 5, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_waitpid
++TRACE_SYSCALL_TABLE(sys_waitpid, sys_waitpid, 7, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_creat
++TRACE_SYSCALL_TABLE(sys_creat, sys_creat, 8, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_link
++TRACE_SYSCALL_TABLE(sys_link, sys_link, 9, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_unlink
++TRACE_SYSCALL_TABLE(sys_unlink, sys_unlink, 10, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_chdir
++TRACE_SYSCALL_TABLE(sys_chdir, sys_chdir, 12, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_time
++TRACE_SYSCALL_TABLE(sys_time, sys_time, 13, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mknod
++TRACE_SYSCALL_TABLE(sys_mknod, sys_mknod, 14, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_chmod
++TRACE_SYSCALL_TABLE(sys_chmod, sys_chmod, 15, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lchown16
++TRACE_SYSCALL_TABLE(sys_lchown16, sys_lchown16, 16, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_stat
++TRACE_SYSCALL_TABLE(sys_stat, sys_stat, 18, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mount
++TRACE_SYSCALL_TABLE(sys_mount, sys_mount, 21, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_oldumount
++TRACE_SYSCALL_TABLE(sys_oldumount, sys_oldumount, 22, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_stime
++TRACE_SYSCALL_TABLE(sys_stime, sys_stime, 25, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fstat
++TRACE_SYSCALL_TABLE(sys_fstat, sys_fstat, 28, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_utime
++TRACE_SYSCALL_TABLE(sys_utime, sys_utime, 30, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_access
++TRACE_SYSCALL_TABLE(sys_access, sys_access, 33, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rename
++TRACE_SYSCALL_TABLE(sys_rename, sys_rename, 38, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mkdir
++TRACE_SYSCALL_TABLE(sys_mkdir, sys_mkdir, 39, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rmdir
++TRACE_SYSCALL_TABLE(sys_rmdir, sys_rmdir, 40, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_pipe
++TRACE_SYSCALL_TABLE(sys_pipe, sys_pipe, 42, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_times
++TRACE_SYSCALL_TABLE(sys_times, sys_times, 43, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_acct
++TRACE_SYSCALL_TABLE(sys_acct, sys_acct, 51, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_umount
++TRACE_SYSCALL_TABLE(sys_umount, sys_umount, 52, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_olduname
++TRACE_SYSCALL_TABLE(sys_olduname, sys_olduname, 59, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_chroot
++TRACE_SYSCALL_TABLE(sys_chroot, sys_chroot, 61, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ustat
++TRACE_SYSCALL_TABLE(sys_ustat, sys_ustat, 62, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sigpending
++TRACE_SYSCALL_TABLE(sys_sigpending, sys_sigpending, 73, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sethostname
++TRACE_SYSCALL_TABLE(sys_sethostname, sys_sethostname, 74, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setrlimit
++TRACE_SYSCALL_TABLE(sys_setrlimit, sys_setrlimit, 75, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_old_getrlimit
++TRACE_SYSCALL_TABLE(sys_old_getrlimit, sys_old_getrlimit, 76, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getrusage
++TRACE_SYSCALL_TABLE(sys_getrusage, sys_getrusage, 77, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_gettimeofday
++TRACE_SYSCALL_TABLE(sys_gettimeofday, sys_gettimeofday, 78, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_settimeofday
++TRACE_SYSCALL_TABLE(sys_settimeofday, sys_settimeofday, 79, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getgroups16
++TRACE_SYSCALL_TABLE(sys_getgroups16, sys_getgroups16, 80, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setgroups16
++TRACE_SYSCALL_TABLE(sys_setgroups16, sys_setgroups16, 81, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_old_select
++TRACE_SYSCALL_TABLE(sys_old_select, sys_old_select, 82, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_symlink
++TRACE_SYSCALL_TABLE(sys_symlink, sys_symlink, 83, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lstat
++TRACE_SYSCALL_TABLE(sys_lstat, sys_lstat, 84, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_readlink
++TRACE_SYSCALL_TABLE(sys_readlink, sys_readlink, 85, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_uselib
++TRACE_SYSCALL_TABLE(sys_uselib, sys_uselib, 86, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_swapon
++TRACE_SYSCALL_TABLE(sys_swapon, sys_swapon, 87, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_reboot
++TRACE_SYSCALL_TABLE(sys_reboot, sys_reboot, 88, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_old_readdir
++TRACE_SYSCALL_TABLE(sys_old_readdir, sys_old_readdir, 89, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_old_mmap
++TRACE_SYSCALL_TABLE(sys_old_mmap, sys_old_mmap, 90, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_truncate
++TRACE_SYSCALL_TABLE(sys_truncate, sys_truncate, 92, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_statfs
++TRACE_SYSCALL_TABLE(sys_statfs, sys_statfs, 99, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fstatfs
++TRACE_SYSCALL_TABLE(sys_fstatfs, sys_fstatfs, 100, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_socketcall
++TRACE_SYSCALL_TABLE(sys_socketcall, sys_socketcall, 102, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_syslog
++TRACE_SYSCALL_TABLE(sys_syslog, sys_syslog, 103, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setitimer
++TRACE_SYSCALL_TABLE(sys_setitimer, sys_setitimer, 104, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getitimer
++TRACE_SYSCALL_TABLE(sys_getitimer, sys_getitimer, 105, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_newstat
++TRACE_SYSCALL_TABLE(sys_newstat, sys_newstat, 106, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_newlstat
++TRACE_SYSCALL_TABLE(sys_newlstat, sys_newlstat, 107, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_newfstat
++TRACE_SYSCALL_TABLE(sys_newfstat, sys_newfstat, 108, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_uname
++TRACE_SYSCALL_TABLE(sys_uname, sys_uname, 109, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_wait4
++TRACE_SYSCALL_TABLE(sys_wait4, sys_wait4, 114, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_swapoff
++TRACE_SYSCALL_TABLE(sys_swapoff, sys_swapoff, 115, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sysinfo
++TRACE_SYSCALL_TABLE(sys_sysinfo, sys_sysinfo, 116, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ipc
++TRACE_SYSCALL_TABLE(sys_ipc, sys_ipc, 117, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setdomainname
++TRACE_SYSCALL_TABLE(sys_setdomainname, sys_setdomainname, 121, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_newuname
++TRACE_SYSCALL_TABLE(sys_newuname, sys_newuname, 122, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_adjtimex
++TRACE_SYSCALL_TABLE(sys_adjtimex, sys_adjtimex, 124, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sigprocmask
++TRACE_SYSCALL_TABLE(sys_sigprocmask, sys_sigprocmask, 126, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_init_module
++TRACE_SYSCALL_TABLE(sys_init_module, sys_init_module, 128, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_delete_module
++TRACE_SYSCALL_TABLE(sys_delete_module, sys_delete_module, 129, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_quotactl
++TRACE_SYSCALL_TABLE(sys_quotactl, sys_quotactl, 131, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_llseek
++TRACE_SYSCALL_TABLE(sys_llseek, sys_llseek, 140, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getdents
++TRACE_SYSCALL_TABLE(sys_getdents, sys_getdents, 141, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_select
++TRACE_SYSCALL_TABLE(sys_select, sys_select, 142, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_readv
++TRACE_SYSCALL_TABLE(sys_readv, sys_readv, 145, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_writev
++TRACE_SYSCALL_TABLE(sys_writev, sys_writev, 146, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sysctl
++TRACE_SYSCALL_TABLE(sys_sysctl, sys_sysctl, 149, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_setparam
++TRACE_SYSCALL_TABLE(sys_sched_setparam, sys_sched_setparam, 154, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_getparam
++TRACE_SYSCALL_TABLE(sys_sched_getparam, sys_sched_getparam, 155, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_setscheduler
++TRACE_SYSCALL_TABLE(sys_sched_setscheduler, sys_sched_setscheduler, 156, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_rr_get_interval
++TRACE_SYSCALL_TABLE(sys_sched_rr_get_interval, sys_sched_rr_get_interval, 161, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_nanosleep
++TRACE_SYSCALL_TABLE(sys_nanosleep, sys_nanosleep, 162, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getresuid16
++TRACE_SYSCALL_TABLE(sys_getresuid16, sys_getresuid16, 165, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_poll
++TRACE_SYSCALL_TABLE(sys_poll, sys_poll, 168, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getresgid16
++TRACE_SYSCALL_TABLE(sys_getresgid16, sys_getresgid16, 171, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rt_sigaction
++TRACE_SYSCALL_TABLE(sys_rt_sigaction, sys_rt_sigaction, 174, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rt_sigprocmask
++TRACE_SYSCALL_TABLE(sys_rt_sigprocmask, sys_rt_sigprocmask, 175, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rt_sigpending
++TRACE_SYSCALL_TABLE(sys_rt_sigpending, sys_rt_sigpending, 176, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rt_sigtimedwait
++TRACE_SYSCALL_TABLE(sys_rt_sigtimedwait, sys_rt_sigtimedwait, 177, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rt_sigqueueinfo
++TRACE_SYSCALL_TABLE(sys_rt_sigqueueinfo, sys_rt_sigqueueinfo, 178, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rt_sigsuspend
++TRACE_SYSCALL_TABLE(sys_rt_sigsuspend, sys_rt_sigsuspend, 179, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_chown16
++TRACE_SYSCALL_TABLE(sys_chown16, sys_chown16, 182, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getcwd
++TRACE_SYSCALL_TABLE(sys_getcwd, sys_getcwd, 183, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sendfile
++TRACE_SYSCALL_TABLE(sys_sendfile, sys_sendfile, 187, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getrlimit
++TRACE_SYSCALL_TABLE(sys_getrlimit, sys_getrlimit, 191, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_stat64
++TRACE_SYSCALL_TABLE(sys_stat64, sys_stat64, 195, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lstat64
++TRACE_SYSCALL_TABLE(sys_lstat64, sys_lstat64, 196, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fstat64
++TRACE_SYSCALL_TABLE(sys_fstat64, sys_fstat64, 197, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lchown
++TRACE_SYSCALL_TABLE(sys_lchown, sys_lchown, 198, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getgroups
++TRACE_SYSCALL_TABLE(sys_getgroups, sys_getgroups, 205, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setgroups
++TRACE_SYSCALL_TABLE(sys_setgroups, sys_setgroups, 206, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getresuid
++TRACE_SYSCALL_TABLE(sys_getresuid, sys_getresuid, 209, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getresgid
++TRACE_SYSCALL_TABLE(sys_getresgid, sys_getresgid, 211, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_chown
++TRACE_SYSCALL_TABLE(sys_chown, sys_chown, 212, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_pivot_root
++TRACE_SYSCALL_TABLE(sys_pivot_root, sys_pivot_root, 217, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mincore
++TRACE_SYSCALL_TABLE(sys_mincore, sys_mincore, 218, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getdents64
++TRACE_SYSCALL_TABLE(sys_getdents64, sys_getdents64, 220, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_setxattr
++TRACE_SYSCALL_TABLE(sys_setxattr, sys_setxattr, 226, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lsetxattr
++TRACE_SYSCALL_TABLE(sys_lsetxattr, sys_lsetxattr, 227, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fsetxattr
++TRACE_SYSCALL_TABLE(sys_fsetxattr, sys_fsetxattr, 228, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getxattr
++TRACE_SYSCALL_TABLE(sys_getxattr, sys_getxattr, 229, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lgetxattr
++TRACE_SYSCALL_TABLE(sys_lgetxattr, sys_lgetxattr, 230, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fgetxattr
++TRACE_SYSCALL_TABLE(sys_fgetxattr, sys_fgetxattr, 231, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_listxattr
++TRACE_SYSCALL_TABLE(sys_listxattr, sys_listxattr, 232, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_llistxattr
++TRACE_SYSCALL_TABLE(sys_llistxattr, sys_llistxattr, 233, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_flistxattr
++TRACE_SYSCALL_TABLE(sys_flistxattr, sys_flistxattr, 234, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_removexattr
++TRACE_SYSCALL_TABLE(sys_removexattr, sys_removexattr, 235, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_lremovexattr
++TRACE_SYSCALL_TABLE(sys_lremovexattr, sys_lremovexattr, 236, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fremovexattr
++TRACE_SYSCALL_TABLE(sys_fremovexattr, sys_fremovexattr, 237, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sendfile64
++TRACE_SYSCALL_TABLE(sys_sendfile64, sys_sendfile64, 239, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_futex
++TRACE_SYSCALL_TABLE(sys_futex, sys_futex, 240, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_setaffinity
++TRACE_SYSCALL_TABLE(sys_sched_setaffinity, sys_sched_setaffinity, 241, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sched_getaffinity
++TRACE_SYSCALL_TABLE(sys_sched_getaffinity, sys_sched_getaffinity, 242, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_io_setup
++TRACE_SYSCALL_TABLE(sys_io_setup, sys_io_setup, 245, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_io_getevents
++TRACE_SYSCALL_TABLE(sys_io_getevents, sys_io_getevents, 247, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_io_submit
++TRACE_SYSCALL_TABLE(sys_io_submit, sys_io_submit, 248, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_io_cancel
++TRACE_SYSCALL_TABLE(sys_io_cancel, sys_io_cancel, 249, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_epoll_ctl
++TRACE_SYSCALL_TABLE(sys_epoll_ctl, sys_epoll_ctl, 255, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_epoll_wait
++TRACE_SYSCALL_TABLE(sys_epoll_wait, sys_epoll_wait, 256, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_set_tid_address
++TRACE_SYSCALL_TABLE(sys_set_tid_address, sys_set_tid_address, 258, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timer_create
++TRACE_SYSCALL_TABLE(sys_timer_create, sys_timer_create, 259, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timer_settime
++TRACE_SYSCALL_TABLE(sys_timer_settime, sys_timer_settime, 260, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timer_gettime
++TRACE_SYSCALL_TABLE(sys_timer_gettime, sys_timer_gettime, 261, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_clock_settime
++TRACE_SYSCALL_TABLE(sys_clock_settime, sys_clock_settime, 264, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_clock_gettime
++TRACE_SYSCALL_TABLE(sys_clock_gettime, sys_clock_gettime, 265, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_clock_getres
++TRACE_SYSCALL_TABLE(sys_clock_getres, sys_clock_getres, 266, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_clock_nanosleep
++TRACE_SYSCALL_TABLE(sys_clock_nanosleep, sys_clock_nanosleep, 267, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_statfs64
++TRACE_SYSCALL_TABLE(sys_statfs64, sys_statfs64, 268, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fstatfs64
++TRACE_SYSCALL_TABLE(sys_fstatfs64, sys_fstatfs64, 269, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_utimes
++TRACE_SYSCALL_TABLE(sys_utimes, sys_utimes, 271, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mq_open
++TRACE_SYSCALL_TABLE(sys_mq_open, sys_mq_open, 277, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mq_unlink
++TRACE_SYSCALL_TABLE(sys_mq_unlink, sys_mq_unlink, 278, 1)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mq_timedsend
++TRACE_SYSCALL_TABLE(sys_mq_timedsend, sys_mq_timedsend, 279, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mq_timedreceive
++TRACE_SYSCALL_TABLE(sys_mq_timedreceive, sys_mq_timedreceive, 280, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mq_notify
++TRACE_SYSCALL_TABLE(sys_mq_notify, sys_mq_notify, 281, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mq_getsetattr
++TRACE_SYSCALL_TABLE(sys_mq_getsetattr, sys_mq_getsetattr, 282, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_kexec_load
++TRACE_SYSCALL_TABLE(sys_kexec_load, sys_kexec_load, 283, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_waitid
++TRACE_SYSCALL_TABLE(sys_waitid, sys_waitid, 284, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_add_key
++TRACE_SYSCALL_TABLE(sys_add_key, sys_add_key, 286, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_request_key
++TRACE_SYSCALL_TABLE(sys_request_key, sys_request_key, 287, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_inotify_add_watch
++TRACE_SYSCALL_TABLE(sys_inotify_add_watch, sys_inotify_add_watch, 292, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_openat
++TRACE_SYSCALL_TABLE(sys_openat, sys_openat, 295, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mkdirat
++TRACE_SYSCALL_TABLE(sys_mkdirat, sys_mkdirat, 296, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_mknodat
++TRACE_SYSCALL_TABLE(sys_mknodat, sys_mknodat, 297, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fchownat
++TRACE_SYSCALL_TABLE(sys_fchownat, sys_fchownat, 298, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_futimesat
++TRACE_SYSCALL_TABLE(sys_futimesat, sys_futimesat, 299, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fstatat64
++TRACE_SYSCALL_TABLE(sys_fstatat64, sys_fstatat64, 300, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_unlinkat
++TRACE_SYSCALL_TABLE(sys_unlinkat, sys_unlinkat, 301, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_renameat
++TRACE_SYSCALL_TABLE(sys_renameat, sys_renameat, 302, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_linkat
++TRACE_SYSCALL_TABLE(sys_linkat, sys_linkat, 303, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_symlinkat
++TRACE_SYSCALL_TABLE(sys_symlinkat, sys_symlinkat, 304, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_readlinkat
++TRACE_SYSCALL_TABLE(sys_readlinkat, sys_readlinkat, 305, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_fchmodat
++TRACE_SYSCALL_TABLE(sys_fchmodat, sys_fchmodat, 306, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_faccessat
++TRACE_SYSCALL_TABLE(sys_faccessat, sys_faccessat, 307, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_pselect6
++TRACE_SYSCALL_TABLE(sys_pselect6, sys_pselect6, 308, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_ppoll
++TRACE_SYSCALL_TABLE(sys_ppoll, sys_ppoll, 309, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_set_robust_list
++TRACE_SYSCALL_TABLE(sys_set_robust_list, sys_set_robust_list, 311, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_get_robust_list
++TRACE_SYSCALL_TABLE(sys_get_robust_list, sys_get_robust_list, 312, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_splice
++TRACE_SYSCALL_TABLE(sys_splice, sys_splice, 313, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_vmsplice
++TRACE_SYSCALL_TABLE(sys_vmsplice, sys_vmsplice, 316, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_getcpu
++TRACE_SYSCALL_TABLE(sys_getcpu, sys_getcpu, 318, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_epoll_pwait
++TRACE_SYSCALL_TABLE(sys_epoll_pwait, sys_epoll_pwait, 319, 6)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_utimensat
++TRACE_SYSCALL_TABLE(sys_utimensat, sys_utimensat, 320, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_signalfd
++TRACE_SYSCALL_TABLE(sys_signalfd, sys_signalfd, 321, 3)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timerfd_settime
++TRACE_SYSCALL_TABLE(sys_timerfd_settime, sys_timerfd_settime, 325, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_timerfd_gettime
++TRACE_SYSCALL_TABLE(sys_timerfd_gettime, sys_timerfd_gettime, 326, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_signalfd4
++TRACE_SYSCALL_TABLE(sys_signalfd4, sys_signalfd4, 327, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_pipe2
++TRACE_SYSCALL_TABLE(sys_pipe2, sys_pipe2, 331, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_preadv
++TRACE_SYSCALL_TABLE(sys_preadv, sys_preadv, 333, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_pwritev
++TRACE_SYSCALL_TABLE(sys_pwritev, sys_pwritev, 334, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_rt_tgsigqueueinfo
++TRACE_SYSCALL_TABLE(sys_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, 335, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_perf_event_open
++TRACE_SYSCALL_TABLE(sys_perf_event_open, sys_perf_event_open, 336, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_recvmmsg
++TRACE_SYSCALL_TABLE(sys_recvmmsg, sys_recvmmsg, 337, 5)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_prlimit64
++TRACE_SYSCALL_TABLE(sys_prlimit64, sys_prlimit64, 340, 4)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_clock_adjtime
++TRACE_SYSCALL_TABLE(sys_clock_adjtime, sys_clock_adjtime, 343, 2)
++#endif
++#ifndef OVERRIDE_TABLE_32_sys_sendmmsg
++TRACE_SYSCALL_TABLE(sys_sendmmsg, sys_sendmmsg, 345, 4)
++#endif
++
++#endif /* CREATE_SYSCALL_TABLE */
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers_override.h
+@@ -0,0 +1,33 @@
++
++#ifndef CREATE_SYSCALL_TABLE
++
++# ifndef CONFIG_UID16
++# define OVERRIDE_32_sys_getgroups16
++# define OVERRIDE_32_sys_setgroups16
++# define OVERRIDE_32_sys_lchown16
++# define OVERRIDE_32_sys_getresuid16
++# define OVERRIDE_32_sys_getresgid16
++# define OVERRIDE_32_sys_chown16
++# endif
++
++#else /* CREATE_SYSCALL_TABLE */
++
++# ifndef CONFIG_UID16
++# define OVERRIDE_TABLE_32_sys_getgroups16
++# define OVERRIDE_TABLE_32_sys_setgroups16
++# define OVERRIDE_TABLE_32_sys_lchown16
++# define OVERRIDE_TABLE_32_sys_getresuid16
++# define OVERRIDE_TABLE_32_sys_getresgid16
++# define OVERRIDE_TABLE_32_sys_chown16
++# endif
++
++#define OVERRIDE_TABLE_32_sys_execve
++TRACE_SYSCALL_TABLE(sys_execve, sys_execve, 11, 3)
++#define OVERRIDE_TABLE_32_sys_clone
++TRACE_SYSCALL_TABLE(sys_clone, sys_clone, 120, 5)
++#define OVERRIDE_TABLE_32_sys_getcpu
++TRACE_SYSCALL_TABLE(sys_getcpu, sys_getcpu, 318, 3)
++
++#endif /* CREATE_SYSCALL_TABLE */
++
++
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.10.0-rc7_integers.h
+@@ -0,0 +1,1097 @@
++/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */
++#ifndef CREATE_SYSCALL_TABLE
++
++#if !defined(_TRACE_SYSCALLS_INTEGERS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SYSCALLS_INTEGERS_H
++
++#include <linux/tracepoint.h>
++#include <linux/syscalls.h>
++#include "x86-64-syscalls-3.10.0-rc7_integers_override.h"
++#include "syscalls_integers_override.h"
++
++SC_DECLARE_EVENT_CLASS_NOARGS(syscalls_noargs,
++ TP_STRUCT__entry(),
++ TP_fast_assign(),
++ TP_printk()
++)
++#ifndef OVERRIDE_64_sys_sched_yield
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sched_yield)
++#endif
++#ifndef OVERRIDE_64_sys_pause
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_pause)
++#endif
++#ifndef OVERRIDE_64_sys_getpid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getpid)
++#endif
++#ifndef OVERRIDE_64_sys_getuid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getuid)
++#endif
++#ifndef OVERRIDE_64_sys_getgid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getgid)
++#endif
++#ifndef OVERRIDE_64_sys_geteuid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_geteuid)
++#endif
++#ifndef OVERRIDE_64_sys_getegid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getegid)
++#endif
++#ifndef OVERRIDE_64_sys_getppid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getppid)
++#endif
++#ifndef OVERRIDE_64_sys_getpgrp
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_getpgrp)
++#endif
++#ifndef OVERRIDE_64_sys_setsid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_setsid)
++#endif
++#ifndef OVERRIDE_64_sys_munlockall
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_munlockall)
++#endif
++#ifndef OVERRIDE_64_sys_vhangup
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_vhangup)
++#endif
++#ifndef OVERRIDE_64_sys_sync
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_sync)
++#endif
++#ifndef OVERRIDE_64_sys_gettid
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_gettid)
++#endif
++#ifndef OVERRIDE_64_sys_restart_syscall
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_restart_syscall)
++#endif
++#ifndef OVERRIDE_64_sys_inotify_init
++SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_inotify_init)
++#endif
++#ifndef OVERRIDE_64_sys_close
++SC_TRACE_EVENT(sys_close,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_brk
++SC_TRACE_EVENT(sys_brk,
++ TP_PROTO(unsigned long brk),
++ TP_ARGS(brk),
++ TP_STRUCT__entry(__field(unsigned long, brk)),
++ TP_fast_assign(tp_assign(brk, brk)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_dup
++SC_TRACE_EVENT(sys_dup,
++ TP_PROTO(unsigned int fildes),
++ TP_ARGS(fildes),
++ TP_STRUCT__entry(__field(unsigned int, fildes)),
++ TP_fast_assign(tp_assign(fildes, fildes)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_alarm
++SC_TRACE_EVENT(sys_alarm,
++ TP_PROTO(unsigned int seconds),
++ TP_ARGS(seconds),
++ TP_STRUCT__entry(__field(unsigned int, seconds)),
++ TP_fast_assign(tp_assign(seconds, seconds)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_exit
++SC_TRACE_EVENT(sys_exit,
++ TP_PROTO(int error_code),
++ TP_ARGS(error_code),
++ TP_STRUCT__entry(__field(int, error_code)),
++ TP_fast_assign(tp_assign(error_code, error_code)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fsync
++SC_TRACE_EVENT(sys_fsync,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fdatasync
++SC_TRACE_EVENT(sys_fdatasync,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fchdir
++SC_TRACE_EVENT(sys_fchdir,
++ TP_PROTO(unsigned int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(unsigned int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_umask
++SC_TRACE_EVENT(sys_umask,
++ TP_PROTO(int mask),
++ TP_ARGS(mask),
++ TP_STRUCT__entry(__field(int, mask)),
++ TP_fast_assign(tp_assign(mask, mask)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setuid
++SC_TRACE_EVENT(sys_setuid,
++ TP_PROTO(uid_t uid),
++ TP_ARGS(uid),
++ TP_STRUCT__entry(__field(uid_t, uid)),
++ TP_fast_assign(tp_assign(uid, uid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setgid
++SC_TRACE_EVENT(sys_setgid,
++ TP_PROTO(gid_t gid),
++ TP_ARGS(gid),
++ TP_STRUCT__entry(__field(gid_t, gid)),
++ TP_fast_assign(tp_assign(gid, gid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getpgid
++SC_TRACE_EVENT(sys_getpgid,
++ TP_PROTO(pid_t pid),
++ TP_ARGS(pid),
++ TP_STRUCT__entry(__field(pid_t, pid)),
++ TP_fast_assign(tp_assign(pid, pid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setfsuid
++SC_TRACE_EVENT(sys_setfsuid,
++ TP_PROTO(uid_t uid),
++ TP_ARGS(uid),
++ TP_STRUCT__entry(__field(uid_t, uid)),
++ TP_fast_assign(tp_assign(uid, uid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setfsgid
++SC_TRACE_EVENT(sys_setfsgid,
++ TP_PROTO(gid_t gid),
++ TP_ARGS(gid),
++ TP_STRUCT__entry(__field(gid_t, gid)),
++ TP_fast_assign(tp_assign(gid, gid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getsid
++SC_TRACE_EVENT(sys_getsid,
++ TP_PROTO(pid_t pid),
++ TP_ARGS(pid),
++ TP_STRUCT__entry(__field(pid_t, pid)),
++ TP_fast_assign(tp_assign(pid, pid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_personality
++SC_TRACE_EVENT(sys_personality,
++ TP_PROTO(unsigned int personality),
++ TP_ARGS(personality),
++ TP_STRUCT__entry(__field(unsigned int, personality)),
++ TP_fast_assign(tp_assign(personality, personality)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sched_getscheduler
++SC_TRACE_EVENT(sys_sched_getscheduler,
++ TP_PROTO(pid_t pid),
++ TP_ARGS(pid),
++ TP_STRUCT__entry(__field(pid_t, pid)),
++ TP_fast_assign(tp_assign(pid, pid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sched_get_priority_max
++SC_TRACE_EVENT(sys_sched_get_priority_max,
++ TP_PROTO(int policy),
++ TP_ARGS(policy),
++ TP_STRUCT__entry(__field(int, policy)),
++ TP_fast_assign(tp_assign(policy, policy)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sched_get_priority_min
++SC_TRACE_EVENT(sys_sched_get_priority_min,
++ TP_PROTO(int policy),
++ TP_ARGS(policy),
++ TP_STRUCT__entry(__field(int, policy)),
++ TP_fast_assign(tp_assign(policy, policy)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mlockall
++SC_TRACE_EVENT(sys_mlockall,
++ TP_PROTO(int flags),
++ TP_ARGS(flags),
++ TP_STRUCT__entry(__field(int, flags)),
++ TP_fast_assign(tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_io_destroy
++SC_TRACE_EVENT(sys_io_destroy,
++ TP_PROTO(aio_context_t ctx),
++ TP_ARGS(ctx),
++ TP_STRUCT__entry(__field(aio_context_t, ctx)),
++ TP_fast_assign(tp_assign(ctx, ctx)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_epoll_create
++SC_TRACE_EVENT(sys_epoll_create,
++ TP_PROTO(int size),
++ TP_ARGS(size),
++ TP_STRUCT__entry(__field(int, size)),
++ TP_fast_assign(tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_timer_getoverrun
++SC_TRACE_EVENT(sys_timer_getoverrun,
++ TP_PROTO(timer_t timer_id),
++ TP_ARGS(timer_id),
++ TP_STRUCT__entry(__field(timer_t, timer_id)),
++ TP_fast_assign(tp_assign(timer_id, timer_id)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_timer_delete
++SC_TRACE_EVENT(sys_timer_delete,
++ TP_PROTO(timer_t timer_id),
++ TP_ARGS(timer_id),
++ TP_STRUCT__entry(__field(timer_t, timer_id)),
++ TP_fast_assign(tp_assign(timer_id, timer_id)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_exit_group
++SC_TRACE_EVENT(sys_exit_group,
++ TP_PROTO(int error_code),
++ TP_ARGS(error_code),
++ TP_STRUCT__entry(__field(int, error_code)),
++ TP_fast_assign(tp_assign(error_code, error_code)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_unshare
++SC_TRACE_EVENT(sys_unshare,
++ TP_PROTO(unsigned long unshare_flags),
++ TP_ARGS(unshare_flags),
++ TP_STRUCT__entry(__field(unsigned long, unshare_flags)),
++ TP_fast_assign(tp_assign(unshare_flags, unshare_flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_eventfd
++SC_TRACE_EVENT(sys_eventfd,
++ TP_PROTO(unsigned int count),
++ TP_ARGS(count),
++ TP_STRUCT__entry(__field(unsigned int, count)),
++ TP_fast_assign(tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_epoll_create1
++SC_TRACE_EVENT(sys_epoll_create1,
++ TP_PROTO(int flags),
++ TP_ARGS(flags),
++ TP_STRUCT__entry(__field(int, flags)),
++ TP_fast_assign(tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_inotify_init1
++SC_TRACE_EVENT(sys_inotify_init1,
++ TP_PROTO(int flags),
++ TP_ARGS(flags),
++ TP_STRUCT__entry(__field(int, flags)),
++ TP_fast_assign(tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_syncfs
++SC_TRACE_EVENT(sys_syncfs,
++ TP_PROTO(int fd),
++ TP_ARGS(fd),
++ TP_STRUCT__entry(__field(int, fd)),
++ TP_fast_assign(tp_assign(fd, fd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_munmap
++SC_TRACE_EVENT(sys_munmap,
++ TP_PROTO(unsigned long addr, size_t len),
++ TP_ARGS(addr, len),
++ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(size_t, len)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_dup2
++SC_TRACE_EVENT(sys_dup2,
++ TP_PROTO(unsigned int oldfd, unsigned int newfd),
++ TP_ARGS(oldfd, newfd),
++ TP_STRUCT__entry(__field(unsigned int, oldfd) __field(unsigned int, newfd)),
++ TP_fast_assign(tp_assign(oldfd, oldfd) tp_assign(newfd, newfd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_shutdown
++SC_TRACE_EVENT(sys_shutdown,
++ TP_PROTO(int fd, int how),
++ TP_ARGS(fd, how),
++ TP_STRUCT__entry(__field(int, fd) __field(int, how)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(how, how)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_listen
++SC_TRACE_EVENT(sys_listen,
++ TP_PROTO(int fd, int backlog),
++ TP_ARGS(fd, backlog),
++ TP_STRUCT__entry(__field(int, fd) __field(int, backlog)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(backlog, backlog)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_kill
++SC_TRACE_EVENT(sys_kill,
++ TP_PROTO(pid_t pid, int sig),
++ TP_ARGS(pid, sig),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_msgget
++SC_TRACE_EVENT(sys_msgget,
++ TP_PROTO(key_t key, int msgflg),
++ TP_ARGS(key, msgflg),
++ TP_STRUCT__entry(__field(key_t, key) __field(int, msgflg)),
++ TP_fast_assign(tp_assign(key, key) tp_assign(msgflg, msgflg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_flock
++SC_TRACE_EVENT(sys_flock,
++ TP_PROTO(unsigned int fd, unsigned int cmd),
++ TP_ARGS(fd, cmd),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_ftruncate
++SC_TRACE_EVENT(sys_ftruncate,
++ TP_PROTO(unsigned int fd, unsigned long length),
++ TP_ARGS(fd, length),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned long, length)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(length, length)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fchmod
++SC_TRACE_EVENT(sys_fchmod,
++ TP_PROTO(unsigned int fd, umode_t mode),
++ TP_ARGS(fd, mode),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(umode_t, mode)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setpgid
++SC_TRACE_EVENT(sys_setpgid,
++ TP_PROTO(pid_t pid, pid_t pgid),
++ TP_ARGS(pid, pgid),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(pid_t, pgid)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(pgid, pgid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setreuid
++SC_TRACE_EVENT(sys_setreuid,
++ TP_PROTO(uid_t ruid, uid_t euid),
++ TP_ARGS(ruid, euid),
++ TP_STRUCT__entry(__field(uid_t, ruid) __field(uid_t, euid)),
++ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setregid
++SC_TRACE_EVENT(sys_setregid,
++ TP_PROTO(gid_t rgid, gid_t egid),
++ TP_ARGS(rgid, egid),
++ TP_STRUCT__entry(__field(gid_t, rgid) __field(gid_t, egid)),
++ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getpriority
++SC_TRACE_EVENT(sys_getpriority,
++ TP_PROTO(int which, int who),
++ TP_ARGS(which, who),
++ TP_STRUCT__entry(__field(int, which) __field(int, who)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mlock
++SC_TRACE_EVENT(sys_mlock,
++ TP_PROTO(unsigned long start, size_t len),
++ TP_ARGS(start, len),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_munlock
++SC_TRACE_EVENT(sys_munlock,
++ TP_PROTO(unsigned long start, size_t len),
++ TP_ARGS(start, len),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_tkill
++SC_TRACE_EVENT(sys_tkill,
++ TP_PROTO(pid_t pid, int sig),
++ TP_ARGS(pid, sig),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_ioprio_get
++SC_TRACE_EVENT(sys_ioprio_get,
++ TP_PROTO(int which, int who),
++ TP_ARGS(which, who),
++ TP_STRUCT__entry(__field(int, which) __field(int, who)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_inotify_rm_watch
++SC_TRACE_EVENT(sys_inotify_rm_watch,
++ TP_PROTO(int fd, __s32 wd),
++ TP_ARGS(fd, wd),
++ TP_STRUCT__entry(__field(int, fd) __field(__s32, wd)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(wd, wd)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_timerfd_create
++SC_TRACE_EVENT(sys_timerfd_create,
++ TP_PROTO(int clockid, int flags),
++ TP_ARGS(clockid, flags),
++ TP_STRUCT__entry(__field(int, clockid) __field(int, flags)),
++ TP_fast_assign(tp_assign(clockid, clockid) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_eventfd2
++SC_TRACE_EVENT(sys_eventfd2,
++ TP_PROTO(unsigned int count, int flags),
++ TP_ARGS(count, flags),
++ TP_STRUCT__entry(__field(unsigned int, count) __field(int, flags)),
++ TP_fast_assign(tp_assign(count, count) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fanotify_init
++SC_TRACE_EVENT(sys_fanotify_init,
++ TP_PROTO(unsigned int flags, unsigned int event_f_flags),
++ TP_ARGS(flags, event_f_flags),
++ TP_STRUCT__entry(__field(unsigned int, flags) __field(unsigned int, event_f_flags)),
++ TP_fast_assign(tp_assign(flags, flags) tp_assign(event_f_flags, event_f_flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setns
++SC_TRACE_EVENT(sys_setns,
++ TP_PROTO(int fd, int nstype),
++ TP_ARGS(fd, nstype),
++ TP_STRUCT__entry(__field(int, fd) __field(int, nstype)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(nstype, nstype)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_lseek
++SC_TRACE_EVENT(sys_lseek,
++ TP_PROTO(unsigned int fd, off_t offset, unsigned int whence),
++ TP_ARGS(fd, offset, whence),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(off_t, offset) __field(unsigned int, whence)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(offset, offset) tp_assign(whence, whence)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mprotect
++SC_TRACE_EVENT(sys_mprotect,
++ TP_PROTO(unsigned long start, size_t len, unsigned long prot),
++ TP_ARGS(start, len, prot),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field(unsigned long, prot)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(prot, prot)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_ioctl
++SC_TRACE_EVENT(sys_ioctl,
++ TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
++ TP_ARGS(fd, cmd, arg),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_msync
++SC_TRACE_EVENT(sys_msync,
++ TP_PROTO(unsigned long start, size_t len, int flags),
++ TP_ARGS(start, len, flags),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field(int, flags)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_madvise
++SC_TRACE_EVENT(sys_madvise,
++ TP_PROTO(unsigned long start, size_t len_in, int behavior),
++ TP_ARGS(start, len_in, behavior),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len_in) __field(int, behavior)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len_in, len_in) tp_assign(behavior, behavior)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_shmget
++SC_TRACE_EVENT(sys_shmget,
++ TP_PROTO(key_t key, size_t size, int shmflg),
++ TP_ARGS(key, size, shmflg),
++ TP_STRUCT__entry(__field(key_t, key) __field(size_t, size) __field(int, shmflg)),
++ TP_fast_assign(tp_assign(key, key) tp_assign(size, size) tp_assign(shmflg, shmflg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_socket
++SC_TRACE_EVENT(sys_socket,
++ TP_PROTO(int family, int type, int protocol),
++ TP_ARGS(family, type, protocol),
++ TP_STRUCT__entry(__field(int, family) __field(int, type) __field(int, protocol)),
++ TP_fast_assign(tp_assign(family, family) tp_assign(type, type) tp_assign(protocol, protocol)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_semget
++SC_TRACE_EVENT(sys_semget,
++ TP_PROTO(key_t key, int nsems, int semflg),
++ TP_ARGS(key, nsems, semflg),
++ TP_STRUCT__entry(__field(key_t, key) __field(int, nsems) __field(int, semflg)),
++ TP_fast_assign(tp_assign(key, key) tp_assign(nsems, nsems) tp_assign(semflg, semflg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fcntl
++SC_TRACE_EVENT(sys_fcntl,
++ TP_PROTO(unsigned int fd, unsigned int cmd, unsigned long arg),
++ TP_ARGS(fd, cmd, arg),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(unsigned int, cmd) __field(unsigned long, arg)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fchown
++SC_TRACE_EVENT(sys_fchown,
++ TP_PROTO(unsigned int fd, uid_t user, gid_t group),
++ TP_ARGS(fd, user, group),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field(uid_t, user) __field(gid_t, group)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setresuid
++SC_TRACE_EVENT(sys_setresuid,
++ TP_PROTO(uid_t ruid, uid_t euid, uid_t suid),
++ TP_ARGS(ruid, euid, suid),
++ TP_STRUCT__entry(__field(uid_t, ruid) __field(uid_t, euid) __field(uid_t, suid)),
++ TP_fast_assign(tp_assign(ruid, ruid) tp_assign(euid, euid) tp_assign(suid, suid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setresgid
++SC_TRACE_EVENT(sys_setresgid,
++ TP_PROTO(gid_t rgid, gid_t egid, gid_t sgid),
++ TP_ARGS(rgid, egid, sgid),
++ TP_STRUCT__entry(__field(gid_t, rgid) __field(gid_t, egid) __field(gid_t, sgid)),
++ TP_fast_assign(tp_assign(rgid, rgid) tp_assign(egid, egid) tp_assign(sgid, sgid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sysfs
++SC_TRACE_EVENT(sys_sysfs,
++ TP_PROTO(int option, unsigned long arg1, unsigned long arg2),
++ TP_ARGS(option, arg1, arg2),
++ TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg1) __field(unsigned long, arg2)),
++ TP_fast_assign(tp_assign(option, option) tp_assign(arg1, arg1) tp_assign(arg2, arg2)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setpriority
++SC_TRACE_EVENT(sys_setpriority,
++ TP_PROTO(int which, int who, int niceval),
++ TP_ARGS(which, who, niceval),
++ TP_STRUCT__entry(__field(int, which) __field(int, who) __field(int, niceval)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who) tp_assign(niceval, niceval)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_readahead
++SC_TRACE_EVENT(sys_readahead,
++ TP_PROTO(int fd, loff_t offset, size_t count),
++ TP_ARGS(fd, offset, count),
++ TP_STRUCT__entry(__field(int, fd) __field(loff_t, offset) __field(size_t, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(offset, offset) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_tgkill
++SC_TRACE_EVENT(sys_tgkill,
++ TP_PROTO(pid_t tgid, pid_t pid, int sig),
++ TP_ARGS(tgid, pid, sig),
++ TP_STRUCT__entry(__field(pid_t, tgid) __field(pid_t, pid) __field(int, sig)),
++ TP_fast_assign(tp_assign(tgid, tgid) tp_assign(pid, pid) tp_assign(sig, sig)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_ioprio_set
++SC_TRACE_EVENT(sys_ioprio_set,
++ TP_PROTO(int which, int who, int ioprio),
++ TP_ARGS(which, who, ioprio),
++ TP_STRUCT__entry(__field(int, which) __field(int, who) __field(int, ioprio)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(who, who) tp_assign(ioprio, ioprio)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_dup3
++SC_TRACE_EVENT(sys_dup3,
++ TP_PROTO(unsigned int oldfd, unsigned int newfd, int flags),
++ TP_ARGS(oldfd, newfd, flags),
++ TP_STRUCT__entry(__field(unsigned int, oldfd) __field(unsigned int, newfd) __field(int, flags)),
++ TP_fast_assign(tp_assign(oldfd, oldfd) tp_assign(newfd, newfd) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_semctl
++SC_TRACE_EVENT(sys_semctl,
++ TP_PROTO(int semid, int semnum, int cmd, unsigned long arg),
++ TP_ARGS(semid, semnum, cmd, arg),
++ TP_STRUCT__entry(__field(int, semid) __field(int, semnum) __field(int, cmd) __field(unsigned long, arg)),
++ TP_fast_assign(tp_assign(semid, semid) tp_assign(semnum, semnum) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_ptrace
++SC_TRACE_EVENT(sys_ptrace,
++ TP_PROTO(long request, long pid, unsigned long addr, unsigned long data),
++ TP_ARGS(request, pid, addr, data),
++ TP_STRUCT__entry(__field(long, request) __field(long, pid) __field_hex(unsigned long, addr) __field(unsigned long, data)),
++ TP_fast_assign(tp_assign(request, request) tp_assign(pid, pid) tp_assign(addr, addr) tp_assign(data, data)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fadvise64
++SC_TRACE_EVENT(sys_fadvise64,
++ TP_PROTO(int fd, loff_t offset, size_t len, int advice),
++ TP_ARGS(fd, offset, len, advice),
++ TP_STRUCT__entry(__field(int, fd) __field(loff_t, offset) __field(size_t, len) __field(int, advice)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(offset, offset) tp_assign(len, len) tp_assign(advice, advice)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_tee
++SC_TRACE_EVENT(sys_tee,
++ TP_PROTO(int fdin, int fdout, size_t len, unsigned int flags),
++ TP_ARGS(fdin, fdout, len, flags),
++ TP_STRUCT__entry(__field(int, fdin) __field(int, fdout) __field(size_t, len) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fdin, fdin) tp_assign(fdout, fdout) tp_assign(len, len) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sync_file_range
++SC_TRACE_EVENT(sys_sync_file_range,
++ TP_PROTO(int fd, loff_t offset, loff_t nbytes, unsigned int flags),
++ TP_ARGS(fd, offset, nbytes, flags),
++ TP_STRUCT__entry(__field(int, fd) __field(loff_t, offset) __field(loff_t, nbytes) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(offset, offset) tp_assign(nbytes, nbytes) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fallocate
++SC_TRACE_EVENT(sys_fallocate,
++ TP_PROTO(int fd, int mode, loff_t offset, loff_t len),
++ TP_ARGS(fd, mode, offset, len),
++ TP_STRUCT__entry(__field(int, fd) __field(int, mode) __field(loff_t, offset) __field(loff_t, len)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(mode, mode) tp_assign(offset, offset) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mremap
++SC_TRACE_EVENT(sys_mremap,
++ TP_PROTO(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr),
++ TP_ARGS(addr, old_len, new_len, flags, new_addr),
++ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(unsigned long, old_len) __field(unsigned long, new_len) __field(unsigned long, flags) __field_hex(unsigned long, new_addr)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(old_len, old_len) tp_assign(new_len, new_len) tp_assign(flags, flags) tp_assign(new_addr, new_addr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_prctl
++SC_TRACE_EVENT(sys_prctl,
++ TP_PROTO(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5),
++ TP_ARGS(option, arg2, arg3, arg4, arg5),
++ TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg2) __field(unsigned long, arg3) __field(unsigned long, arg4) __field(unsigned long, arg5)),
++ TP_fast_assign(tp_assign(option, option) tp_assign(arg2, arg2) tp_assign(arg3, arg3) tp_assign(arg4, arg4) tp_assign(arg5, arg5)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_remap_file_pages
++SC_TRACE_EVENT(sys_remap_file_pages,
++ TP_PROTO(unsigned long start, unsigned long size, unsigned long prot, unsigned long pgoff, unsigned long flags),
++ TP_ARGS(start, size, prot, pgoff, flags),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(unsigned long, size) __field(unsigned long, prot) __field(unsigned long, pgoff) __field(unsigned long, flags)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(size, size) tp_assign(prot, prot) tp_assign(pgoff, pgoff) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_keyctl
++SC_TRACE_EVENT(sys_keyctl,
++ TP_PROTO(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5),
++ TP_ARGS(option, arg2, arg3, arg4, arg5),
++ TP_STRUCT__entry(__field(int, option) __field(unsigned long, arg2) __field(unsigned long, arg3) __field(unsigned long, arg4) __field(unsigned long, arg5)),
++ TP_fast_assign(tp_assign(option, option) tp_assign(arg2, arg2) tp_assign(arg3, arg3) tp_assign(arg4, arg4) tp_assign(arg5, arg5)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mmap
++SC_TRACE_EVENT(sys_mmap,
++ TP_PROTO(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long off),
++ TP_ARGS(addr, len, prot, flags, fd, off),
++ TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(unsigned long, len) __field(unsigned long, prot) __field(unsigned long, flags) __field(unsigned long, fd) __field(unsigned long, off)),
++ TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len) tp_assign(prot, prot) tp_assign(flags, flags) tp_assign(fd, fd) tp_assign(off, off)),
++ TP_printk()
++)
++#endif
++
++#endif /* _TRACE_SYSCALLS_INTEGERS_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
++
++#else /* CREATE_SYSCALL_TABLE */
++
++#include "x86-64-syscalls-3.10.0-rc7_integers_override.h"
++#include "syscalls_integers_override.h"
++
++#ifndef OVERRIDE_TABLE_64_sys_sched_yield
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sched_yield, 24, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_pause
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_pause, 34, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getpid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getpid, 39, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getuid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getuid, 102, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getgid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getgid, 104, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_geteuid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_geteuid, 107, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getegid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getegid, 108, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getppid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getppid, 110, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getpgrp
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_getpgrp, 111, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setsid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_setsid, 112, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_munlockall
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_munlockall, 152, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_vhangup
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_vhangup, 153, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sync
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_sync, 162, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_gettid
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_gettid, 186, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_restart_syscall
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_restart_syscall, 219, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_inotify_init
++TRACE_SYSCALL_TABLE(syscalls_noargs, sys_inotify_init, 253, 0)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_close
++TRACE_SYSCALL_TABLE(sys_close, sys_close, 3, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_lseek
++TRACE_SYSCALL_TABLE(sys_lseek, sys_lseek, 8, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mmap
++TRACE_SYSCALL_TABLE(sys_mmap, sys_mmap, 9, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mprotect
++TRACE_SYSCALL_TABLE(sys_mprotect, sys_mprotect, 10, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_munmap
++TRACE_SYSCALL_TABLE(sys_munmap, sys_munmap, 11, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_brk
++TRACE_SYSCALL_TABLE(sys_brk, sys_brk, 12, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_ioctl
++TRACE_SYSCALL_TABLE(sys_ioctl, sys_ioctl, 16, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mremap
++TRACE_SYSCALL_TABLE(sys_mremap, sys_mremap, 25, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_msync
++TRACE_SYSCALL_TABLE(sys_msync, sys_msync, 26, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_madvise
++TRACE_SYSCALL_TABLE(sys_madvise, sys_madvise, 28, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_shmget
++TRACE_SYSCALL_TABLE(sys_shmget, sys_shmget, 29, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_dup
++TRACE_SYSCALL_TABLE(sys_dup, sys_dup, 32, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_dup2
++TRACE_SYSCALL_TABLE(sys_dup2, sys_dup2, 33, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_alarm
++TRACE_SYSCALL_TABLE(sys_alarm, sys_alarm, 37, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_socket
++TRACE_SYSCALL_TABLE(sys_socket, sys_socket, 41, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_shutdown
++TRACE_SYSCALL_TABLE(sys_shutdown, sys_shutdown, 48, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_listen
++TRACE_SYSCALL_TABLE(sys_listen, sys_listen, 50, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_exit
++TRACE_SYSCALL_TABLE(sys_exit, sys_exit, 60, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_kill
++TRACE_SYSCALL_TABLE(sys_kill, sys_kill, 62, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_semget
++TRACE_SYSCALL_TABLE(sys_semget, sys_semget, 64, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_semctl
++TRACE_SYSCALL_TABLE(sys_semctl, sys_semctl, 66, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_msgget
++TRACE_SYSCALL_TABLE(sys_msgget, sys_msgget, 68, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fcntl
++TRACE_SYSCALL_TABLE(sys_fcntl, sys_fcntl, 72, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_flock
++TRACE_SYSCALL_TABLE(sys_flock, sys_flock, 73, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fsync
++TRACE_SYSCALL_TABLE(sys_fsync, sys_fsync, 74, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fdatasync
++TRACE_SYSCALL_TABLE(sys_fdatasync, sys_fdatasync, 75, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_ftruncate
++TRACE_SYSCALL_TABLE(sys_ftruncate, sys_ftruncate, 77, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fchdir
++TRACE_SYSCALL_TABLE(sys_fchdir, sys_fchdir, 81, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fchmod
++TRACE_SYSCALL_TABLE(sys_fchmod, sys_fchmod, 91, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fchown
++TRACE_SYSCALL_TABLE(sys_fchown, sys_fchown, 93, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_umask
++TRACE_SYSCALL_TABLE(sys_umask, sys_umask, 95, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_ptrace
++TRACE_SYSCALL_TABLE(sys_ptrace, sys_ptrace, 101, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setuid
++TRACE_SYSCALL_TABLE(sys_setuid, sys_setuid, 105, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setgid
++TRACE_SYSCALL_TABLE(sys_setgid, sys_setgid, 106, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setpgid
++TRACE_SYSCALL_TABLE(sys_setpgid, sys_setpgid, 109, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setreuid
++TRACE_SYSCALL_TABLE(sys_setreuid, sys_setreuid, 113, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setregid
++TRACE_SYSCALL_TABLE(sys_setregid, sys_setregid, 114, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setresuid
++TRACE_SYSCALL_TABLE(sys_setresuid, sys_setresuid, 117, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setresgid
++TRACE_SYSCALL_TABLE(sys_setresgid, sys_setresgid, 119, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getpgid
++TRACE_SYSCALL_TABLE(sys_getpgid, sys_getpgid, 121, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setfsuid
++TRACE_SYSCALL_TABLE(sys_setfsuid, sys_setfsuid, 122, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setfsgid
++TRACE_SYSCALL_TABLE(sys_setfsgid, sys_setfsgid, 123, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getsid
++TRACE_SYSCALL_TABLE(sys_getsid, sys_getsid, 124, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_personality
++TRACE_SYSCALL_TABLE(sys_personality, sys_personality, 135, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sysfs
++TRACE_SYSCALL_TABLE(sys_sysfs, sys_sysfs, 139, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getpriority
++TRACE_SYSCALL_TABLE(sys_getpriority, sys_getpriority, 140, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setpriority
++TRACE_SYSCALL_TABLE(sys_setpriority, sys_setpriority, 141, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sched_getscheduler
++TRACE_SYSCALL_TABLE(sys_sched_getscheduler, sys_sched_getscheduler, 145, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sched_get_priority_max
++TRACE_SYSCALL_TABLE(sys_sched_get_priority_max, sys_sched_get_priority_max, 146, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sched_get_priority_min
++TRACE_SYSCALL_TABLE(sys_sched_get_priority_min, sys_sched_get_priority_min, 147, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mlock
++TRACE_SYSCALL_TABLE(sys_mlock, sys_mlock, 149, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_munlock
++TRACE_SYSCALL_TABLE(sys_munlock, sys_munlock, 150, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mlockall
++TRACE_SYSCALL_TABLE(sys_mlockall, sys_mlockall, 151, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_prctl
++TRACE_SYSCALL_TABLE(sys_prctl, sys_prctl, 157, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_readahead
++TRACE_SYSCALL_TABLE(sys_readahead, sys_readahead, 187, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_tkill
++TRACE_SYSCALL_TABLE(sys_tkill, sys_tkill, 200, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_io_destroy
++TRACE_SYSCALL_TABLE(sys_io_destroy, sys_io_destroy, 207, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_epoll_create
++TRACE_SYSCALL_TABLE(sys_epoll_create, sys_epoll_create, 213, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_remap_file_pages
++TRACE_SYSCALL_TABLE(sys_remap_file_pages, sys_remap_file_pages, 216, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fadvise64
++TRACE_SYSCALL_TABLE(sys_fadvise64, sys_fadvise64, 221, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_timer_getoverrun
++TRACE_SYSCALL_TABLE(sys_timer_getoverrun, sys_timer_getoverrun, 225, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_timer_delete
++TRACE_SYSCALL_TABLE(sys_timer_delete, sys_timer_delete, 226, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_exit_group
++TRACE_SYSCALL_TABLE(sys_exit_group, sys_exit_group, 231, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_tgkill
++TRACE_SYSCALL_TABLE(sys_tgkill, sys_tgkill, 234, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_keyctl
++TRACE_SYSCALL_TABLE(sys_keyctl, sys_keyctl, 250, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_ioprio_set
++TRACE_SYSCALL_TABLE(sys_ioprio_set, sys_ioprio_set, 251, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_ioprio_get
++TRACE_SYSCALL_TABLE(sys_ioprio_get, sys_ioprio_get, 252, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_inotify_rm_watch
++TRACE_SYSCALL_TABLE(sys_inotify_rm_watch, sys_inotify_rm_watch, 255, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_unshare
++TRACE_SYSCALL_TABLE(sys_unshare, sys_unshare, 272, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_tee
++TRACE_SYSCALL_TABLE(sys_tee, sys_tee, 276, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sync_file_range
++TRACE_SYSCALL_TABLE(sys_sync_file_range, sys_sync_file_range, 277, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_timerfd_create
++TRACE_SYSCALL_TABLE(sys_timerfd_create, sys_timerfd_create, 283, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_eventfd
++TRACE_SYSCALL_TABLE(sys_eventfd, sys_eventfd, 284, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fallocate
++TRACE_SYSCALL_TABLE(sys_fallocate, sys_fallocate, 285, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_eventfd2
++TRACE_SYSCALL_TABLE(sys_eventfd2, sys_eventfd2, 290, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_epoll_create1
++TRACE_SYSCALL_TABLE(sys_epoll_create1, sys_epoll_create1, 291, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_dup3
++TRACE_SYSCALL_TABLE(sys_dup3, sys_dup3, 292, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_inotify_init1
++TRACE_SYSCALL_TABLE(sys_inotify_init1, sys_inotify_init1, 294, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fanotify_init
++TRACE_SYSCALL_TABLE(sys_fanotify_init, sys_fanotify_init, 300, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_syncfs
++TRACE_SYSCALL_TABLE(sys_syncfs, sys_syncfs, 306, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setns
++TRACE_SYSCALL_TABLE(sys_setns, sys_setns, 308, 2)
++#endif
++
++#endif /* CREATE_SYSCALL_TABLE */
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.10.0-rc7_integers_override.h
+@@ -0,0 +1,3 @@
++/*
++ * this is a place-holder for x86_64 integer syscall definition override.
++ */
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.10.0-rc7_pointers.h
+@@ -0,0 +1,2304 @@
++/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */
++#ifndef CREATE_SYSCALL_TABLE
++
++#if !defined(_TRACE_SYSCALLS_POINTERS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SYSCALLS_POINTERS_H
++
++#include <linux/tracepoint.h>
++#include <linux/syscalls.h>
++#include "x86-64-syscalls-3.10.0-rc7_pointers_override.h"
++#include "syscalls_pointers_override.h"
++
++#ifndef OVERRIDE_64_sys_pipe
++SC_TRACE_EVENT(sys_pipe,
++ TP_PROTO(int * fildes),
++ TP_ARGS(fildes),
++ TP_STRUCT__entry(__field_hex(int *, fildes)),
++ TP_fast_assign(tp_assign(fildes, fildes)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_newuname
++SC_TRACE_EVENT(sys_newuname,
++ TP_PROTO(struct new_utsname * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__field_hex(struct new_utsname *, name)),
++ TP_fast_assign(tp_assign(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_shmdt
++SC_TRACE_EVENT(sys_shmdt,
++ TP_PROTO(char * shmaddr),
++ TP_ARGS(shmaddr),
++ TP_STRUCT__entry(__field_hex(char *, shmaddr)),
++ TP_fast_assign(tp_assign(shmaddr, shmaddr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_chdir
++SC_TRACE_EVENT(sys_chdir,
++ TP_PROTO(const char * filename),
++ TP_ARGS(filename),
++ TP_STRUCT__entry(__string_from_user(filename, filename)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_rmdir
++SC_TRACE_EVENT(sys_rmdir,
++ TP_PROTO(const char * pathname),
++ TP_ARGS(pathname),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_unlink
++SC_TRACE_EVENT(sys_unlink,
++ TP_PROTO(const char * pathname),
++ TP_ARGS(pathname),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sysinfo
++SC_TRACE_EVENT(sys_sysinfo,
++ TP_PROTO(struct sysinfo * info),
++ TP_ARGS(info),
++ TP_STRUCT__entry(__field_hex(struct sysinfo *, info)),
++ TP_fast_assign(tp_assign(info, info)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_times
++SC_TRACE_EVENT(sys_times,
++ TP_PROTO(struct tms * tbuf),
++ TP_ARGS(tbuf),
++ TP_STRUCT__entry(__field_hex(struct tms *, tbuf)),
++ TP_fast_assign(tp_assign(tbuf, tbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sysctl
++SC_TRACE_EVENT(sys_sysctl,
++ TP_PROTO(struct __sysctl_args * args),
++ TP_ARGS(args),
++ TP_STRUCT__entry(__field_hex(struct __sysctl_args *, args)),
++ TP_fast_assign(tp_assign(args, args)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_adjtimex
++SC_TRACE_EVENT(sys_adjtimex,
++ TP_PROTO(struct timex * txc_p),
++ TP_ARGS(txc_p),
++ TP_STRUCT__entry(__field_hex(struct timex *, txc_p)),
++ TP_fast_assign(tp_assign(txc_p, txc_p)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_chroot
++SC_TRACE_EVENT(sys_chroot,
++ TP_PROTO(const char * filename),
++ TP_ARGS(filename),
++ TP_STRUCT__entry(__string_from_user(filename, filename)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_acct
++SC_TRACE_EVENT(sys_acct,
++ TP_PROTO(const char * name),
++ TP_ARGS(name),
++ TP_STRUCT__entry(__string_from_user(name, name)),
++ TP_fast_assign(tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_swapoff
++SC_TRACE_EVENT(sys_swapoff,
++ TP_PROTO(const char * specialfile),
++ TP_ARGS(specialfile),
++ TP_STRUCT__entry(__string_from_user(specialfile, specialfile)),
++ TP_fast_assign(tp_copy_string_from_user(specialfile, specialfile)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_time
++SC_TRACE_EVENT(sys_time,
++ TP_PROTO(time_t * tloc),
++ TP_ARGS(tloc),
++ TP_STRUCT__entry(__field_hex(time_t *, tloc)),
++ TP_fast_assign(tp_assign(tloc, tloc)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_set_tid_address
++SC_TRACE_EVENT(sys_set_tid_address,
++ TP_PROTO(int * tidptr),
++ TP_ARGS(tidptr),
++ TP_STRUCT__entry(__field_hex(int *, tidptr)),
++ TP_fast_assign(tp_assign(tidptr, tidptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mq_unlink
++SC_TRACE_EVENT(sys_mq_unlink,
++ TP_PROTO(const char * u_name),
++ TP_ARGS(u_name),
++ TP_STRUCT__entry(__string_from_user(u_name, u_name)),
++ TP_fast_assign(tp_copy_string_from_user(u_name, u_name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_newstat
++SC_TRACE_EVENT(sys_newstat,
++ TP_PROTO(const char * filename, struct stat * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_newfstat
++SC_TRACE_EVENT(sys_newfstat,
++ TP_PROTO(unsigned int fd, struct stat * statbuf),
++ TP_ARGS(fd, statbuf),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct stat *, statbuf)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_newlstat
++SC_TRACE_EVENT(sys_newlstat,
++ TP_PROTO(const char * filename, struct stat * statbuf),
++ TP_ARGS(filename, statbuf),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct stat *, statbuf)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_access
++SC_TRACE_EVENT(sys_access,
++ TP_PROTO(const char * filename, int mode),
++ TP_ARGS(filename, mode),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, mode)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_nanosleep
++SC_TRACE_EVENT(sys_nanosleep,
++ TP_PROTO(struct timespec * rqtp, struct timespec * rmtp),
++ TP_ARGS(rqtp, rmtp),
++ TP_STRUCT__entry(__field_hex(struct timespec *, rqtp) __field_hex(struct timespec *, rmtp)),
++ TP_fast_assign(tp_assign(rqtp, rqtp) tp_assign(rmtp, rmtp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getitimer
++SC_TRACE_EVENT(sys_getitimer,
++ TP_PROTO(int which, struct itimerval * value),
++ TP_ARGS(which, value),
++ TP_STRUCT__entry(__field(int, which) __field_hex(struct itimerval *, value)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(value, value)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_truncate
++SC_TRACE_EVENT(sys_truncate,
++ TP_PROTO(const char * path, long length),
++ TP_ARGS(path, length),
++ TP_STRUCT__entry(__string_from_user(path, path) __field(long, length)),
++ TP_fast_assign(tp_copy_string_from_user(path, path) tp_assign(length, length)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getcwd
++SC_TRACE_EVENT(sys_getcwd,
++ TP_PROTO(char * buf, unsigned long size),
++ TP_ARGS(buf, size),
++ TP_STRUCT__entry(__field_hex(char *, buf) __field(unsigned long, size)),
++ TP_fast_assign(tp_assign(buf, buf) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_rename
++SC_TRACE_EVENT(sys_rename,
++ TP_PROTO(const char * oldname, const char * newname),
++ TP_ARGS(oldname, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mkdir
++SC_TRACE_EVENT(sys_mkdir,
++ TP_PROTO(const char * pathname, umode_t mode),
++ TP_ARGS(pathname, mode),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(umode_t, mode)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_creat
++SC_TRACE_EVENT(sys_creat,
++ TP_PROTO(const char * pathname, umode_t mode),
++ TP_ARGS(pathname, mode),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field(umode_t, mode)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_link
++SC_TRACE_EVENT(sys_link,
++ TP_PROTO(const char * oldname, const char * newname),
++ TP_ARGS(oldname, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_symlink
++SC_TRACE_EVENT(sys_symlink,
++ TP_PROTO(const char * oldname, const char * newname),
++ TP_ARGS(oldname, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_chmod
++SC_TRACE_EVENT(sys_chmod,
++ TP_PROTO(const char * filename, umode_t mode),
++ TP_ARGS(filename, mode),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(umode_t, mode)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_gettimeofday
++SC_TRACE_EVENT(sys_gettimeofday,
++ TP_PROTO(struct timeval * tv, struct timezone * tz),
++ TP_ARGS(tv, tz),
++ TP_STRUCT__entry(__field_hex(struct timeval *, tv) __field_hex(struct timezone *, tz)),
++ TP_fast_assign(tp_assign(tv, tv) tp_assign(tz, tz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getrlimit
++SC_TRACE_EVENT(sys_getrlimit,
++ TP_PROTO(unsigned int resource, struct rlimit * rlim),
++ TP_ARGS(resource, rlim),
++ TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
++ TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getrusage
++SC_TRACE_EVENT(sys_getrusage,
++ TP_PROTO(int who, struct rusage * ru),
++ TP_ARGS(who, ru),
++ TP_STRUCT__entry(__field(int, who) __field_hex(struct rusage *, ru)),
++ TP_fast_assign(tp_assign(who, who) tp_assign(ru, ru)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getgroups
++SC_TRACE_EVENT(sys_getgroups,
++ TP_PROTO(int gidsetsize, gid_t * grouplist),
++ TP_ARGS(gidsetsize, grouplist),
++ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(gid_t *, grouplist)),
++ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setgroups
++SC_TRACE_EVENT(sys_setgroups,
++ TP_PROTO(int gidsetsize, gid_t * grouplist),
++ TP_ARGS(gidsetsize, grouplist),
++ TP_STRUCT__entry(__field(int, gidsetsize) __field_hex(gid_t *, grouplist)),
++ TP_fast_assign(tp_assign(gidsetsize, gidsetsize) tp_assign(grouplist, grouplist)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_rt_sigpending
++SC_TRACE_EVENT(sys_rt_sigpending,
++ TP_PROTO(sigset_t * uset, size_t sigsetsize),
++ TP_ARGS(uset, sigsetsize),
++ TP_STRUCT__entry(__field_hex(sigset_t *, uset) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(uset, uset) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_rt_sigsuspend
++SC_TRACE_EVENT(sys_rt_sigsuspend,
++ TP_PROTO(sigset_t * unewset, size_t sigsetsize),
++ TP_ARGS(unewset, sigsetsize),
++ TP_STRUCT__entry(__field_hex(sigset_t *, unewset) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(unewset, unewset) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sigaltstack
++SC_TRACE_EVENT(sys_sigaltstack,
++ TP_PROTO(const stack_t * uss, stack_t * uoss),
++ TP_ARGS(uss, uoss),
++ TP_STRUCT__entry(__field_hex(const stack_t *, uss) __field_hex(stack_t *, uoss)),
++ TP_fast_assign(tp_assign(uss, uss) tp_assign(uoss, uoss)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_utime
++SC_TRACE_EVENT(sys_utime,
++ TP_PROTO(char * filename, struct utimbuf * times),
++ TP_ARGS(filename, times),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct utimbuf *, times)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(times, times)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_ustat
++SC_TRACE_EVENT(sys_ustat,
++ TP_PROTO(unsigned dev, struct ustat * ubuf),
++ TP_ARGS(dev, ubuf),
++ TP_STRUCT__entry(__field(unsigned, dev) __field_hex(struct ustat *, ubuf)),
++ TP_fast_assign(tp_assign(dev, dev) tp_assign(ubuf, ubuf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_statfs
++SC_TRACE_EVENT(sys_statfs,
++ TP_PROTO(const char * pathname, struct statfs * buf),
++ TP_ARGS(pathname, buf),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(struct statfs *, buf)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fstatfs
++SC_TRACE_EVENT(sys_fstatfs,
++ TP_PROTO(unsigned int fd, struct statfs * buf),
++ TP_ARGS(fd, buf),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct statfs *, buf)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sched_setparam
++SC_TRACE_EVENT(sys_sched_setparam,
++ TP_PROTO(pid_t pid, struct sched_param * param),
++ TP_ARGS(pid, param),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct sched_param *, param)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(param, param)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sched_getparam
++SC_TRACE_EVENT(sys_sched_getparam,
++ TP_PROTO(pid_t pid, struct sched_param * param),
++ TP_ARGS(pid, param),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct sched_param *, param)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(param, param)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sched_rr_get_interval
++SC_TRACE_EVENT(sys_sched_rr_get_interval,
++ TP_PROTO(pid_t pid, struct timespec * interval),
++ TP_ARGS(pid, interval),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(struct timespec *, interval)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(interval, interval)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_pivot_root
++SC_TRACE_EVENT(sys_pivot_root,
++ TP_PROTO(const char * new_root, const char * put_old),
++ TP_ARGS(new_root, put_old),
++ TP_STRUCT__entry(__string_from_user(new_root, new_root) __string_from_user(put_old, put_old)),
++ TP_fast_assign(tp_copy_string_from_user(new_root, new_root) tp_copy_string_from_user(put_old, put_old)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setrlimit
++SC_TRACE_EVENT(sys_setrlimit,
++ TP_PROTO(unsigned int resource, struct rlimit * rlim),
++ TP_ARGS(resource, rlim),
++ TP_STRUCT__entry(__field(unsigned int, resource) __field_hex(struct rlimit *, rlim)),
++ TP_fast_assign(tp_assign(resource, resource) tp_assign(rlim, rlim)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_settimeofday
++SC_TRACE_EVENT(sys_settimeofday,
++ TP_PROTO(struct timeval * tv, struct timezone * tz),
++ TP_ARGS(tv, tz),
++ TP_STRUCT__entry(__field_hex(struct timeval *, tv) __field_hex(struct timezone *, tz)),
++ TP_fast_assign(tp_assign(tv, tv) tp_assign(tz, tz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_umount
++SC_TRACE_EVENT(sys_umount,
++ TP_PROTO(char * name, int flags),
++ TP_ARGS(name, flags),
++ TP_STRUCT__entry(__string_from_user(name, name) __field(int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_swapon
++SC_TRACE_EVENT(sys_swapon,
++ TP_PROTO(const char * specialfile, int swap_flags),
++ TP_ARGS(specialfile, swap_flags),
++ TP_STRUCT__entry(__string_from_user(specialfile, specialfile) __field(int, swap_flags)),
++ TP_fast_assign(tp_copy_string_from_user(specialfile, specialfile) tp_assign(swap_flags, swap_flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sethostname
++SC_TRACE_EVENT(sys_sethostname,
++ TP_PROTO(char * name, int len),
++ TP_ARGS(name, len),
++ TP_STRUCT__entry(__string_from_user(name, name) __field(int, len)),
++ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setdomainname
++SC_TRACE_EVENT(sys_setdomainname,
++ TP_PROTO(char * name, int len),
++ TP_ARGS(name, len),
++ TP_STRUCT__entry(__string_from_user(name, name) __field(int, len)),
++ TP_fast_assign(tp_copy_string_from_user(name, name) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_delete_module
++SC_TRACE_EVENT(sys_delete_module,
++ TP_PROTO(const char * name_user, unsigned int flags),
++ TP_ARGS(name_user, flags),
++ TP_STRUCT__entry(__string_from_user(name_user, name_user) __field(unsigned int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(name_user, name_user) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_removexattr
++SC_TRACE_EVENT(sys_removexattr,
++ TP_PROTO(const char * pathname, const char * name),
++ TP_ARGS(pathname, name),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_lremovexattr
++SC_TRACE_EVENT(sys_lremovexattr,
++ TP_PROTO(const char * pathname, const char * name),
++ TP_ARGS(pathname, name),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fremovexattr
++SC_TRACE_EVENT(sys_fremovexattr,
++ TP_PROTO(int fd, const char * name),
++ TP_ARGS(fd, name),
++ TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name)),
++ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_io_setup
++SC_TRACE_EVENT(sys_io_setup,
++ TP_PROTO(unsigned nr_events, aio_context_t * ctxp),
++ TP_ARGS(nr_events, ctxp),
++ TP_STRUCT__entry(__field(unsigned, nr_events) __field_hex(aio_context_t *, ctxp)),
++ TP_fast_assign(tp_assign(nr_events, nr_events) tp_assign(ctxp, ctxp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_timer_gettime
++SC_TRACE_EVENT(sys_timer_gettime,
++ TP_PROTO(timer_t timer_id, struct itimerspec * setting),
++ TP_ARGS(timer_id, setting),
++ TP_STRUCT__entry(__field(timer_t, timer_id) __field_hex(struct itimerspec *, setting)),
++ TP_fast_assign(tp_assign(timer_id, timer_id) tp_assign(setting, setting)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_clock_settime
++SC_TRACE_EVENT(sys_clock_settime,
++ TP_PROTO(const clockid_t which_clock, const struct timespec * tp),
++ TP_ARGS(which_clock, tp),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(const struct timespec *, tp)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_clock_gettime
++SC_TRACE_EVENT(sys_clock_gettime,
++ TP_PROTO(const clockid_t which_clock, struct timespec * tp),
++ TP_ARGS(which_clock, tp),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timespec *, tp)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_clock_getres
++SC_TRACE_EVENT(sys_clock_getres,
++ TP_PROTO(const clockid_t which_clock, struct timespec * tp),
++ TP_ARGS(which_clock, tp),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timespec *, tp)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(tp, tp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_utimes
++SC_TRACE_EVENT(sys_utimes,
++ TP_PROTO(char * filename, struct timeval * utimes),
++ TP_ARGS(filename, utimes),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field_hex(struct timeval *, utimes)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mq_notify
++SC_TRACE_EVENT(sys_mq_notify,
++ TP_PROTO(mqd_t mqdes, const struct sigevent * u_notification),
++ TP_ARGS(mqdes, u_notification),
++ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const struct sigevent *, u_notification)),
++ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_notification, u_notification)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_set_robust_list
++SC_TRACE_EVENT(sys_set_robust_list,
++ TP_PROTO(struct robust_list_head * head, size_t len),
++ TP_ARGS(head, len),
++ TP_STRUCT__entry(__field_hex(struct robust_list_head *, head) __field(size_t, len)),
++ TP_fast_assign(tp_assign(head, head) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_timerfd_gettime
++SC_TRACE_EVENT(sys_timerfd_gettime,
++ TP_PROTO(int ufd, struct itimerspec * otmr),
++ TP_ARGS(ufd, otmr),
++ TP_STRUCT__entry(__field(int, ufd) __field_hex(struct itimerspec *, otmr)),
++ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(otmr, otmr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_pipe2
++SC_TRACE_EVENT(sys_pipe2,
++ TP_PROTO(int * fildes, int flags),
++ TP_ARGS(fildes, flags),
++ TP_STRUCT__entry(__field_hex(int *, fildes) __field(int, flags)),
++ TP_fast_assign(tp_assign(fildes, fildes) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_clock_adjtime
++SC_TRACE_EVENT(sys_clock_adjtime,
++ TP_PROTO(const clockid_t which_clock, struct timex * utx),
++ TP_ARGS(which_clock, utx),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct timex *, utx)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(utx, utx)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_read
++SC_TRACE_EVENT(sys_read,
++ TP_PROTO(unsigned int fd, char * buf, size_t count),
++ TP_ARGS(fd, buf, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(char *, buf) __field(size_t, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_write
++SC_TRACE_EVENT(sys_write,
++ TP_PROTO(unsigned int fd, const char * buf, size_t count),
++ TP_ARGS(fd, buf, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(const char *, buf) __field(size_t, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_open
++SC_TRACE_EVENT(sys_open,
++ TP_PROTO(const char * filename, int flags, umode_t mode),
++ TP_ARGS(filename, flags, mode),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(int, flags) __field(umode_t, mode)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(flags, flags) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_poll
++SC_TRACE_EVENT(sys_poll,
++ TP_PROTO(struct pollfd * ufds, unsigned int nfds, int timeout_msecs),
++ TP_ARGS(ufds, nfds, timeout_msecs),
++ TP_STRUCT__entry(__field_hex(struct pollfd *, ufds) __field(unsigned int, nfds) __field(int, timeout_msecs)),
++ TP_fast_assign(tp_assign(ufds, ufds) tp_assign(nfds, nfds) tp_assign(timeout_msecs, timeout_msecs)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_readv
++SC_TRACE_EVENT(sys_readv,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen),
++ TP_ARGS(fd, vec, vlen),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_writev
++SC_TRACE_EVENT(sys_writev,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen),
++ TP_ARGS(fd, vec, vlen),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mincore
++SC_TRACE_EVENT(sys_mincore,
++ TP_PROTO(unsigned long start, size_t len, unsigned char * vec),
++ TP_ARGS(start, len, vec),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(size_t, len) __field_hex(unsigned char *, vec)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(vec, vec)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_shmat
++SC_TRACE_EVENT(sys_shmat,
++ TP_PROTO(int shmid, char * shmaddr, int shmflg),
++ TP_ARGS(shmid, shmaddr, shmflg),
++ TP_STRUCT__entry(__field(int, shmid) __field_hex(char *, shmaddr) __field(int, shmflg)),
++ TP_fast_assign(tp_assign(shmid, shmid) tp_assign(shmaddr, shmaddr) tp_assign(shmflg, shmflg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_shmctl
++SC_TRACE_EVENT(sys_shmctl,
++ TP_PROTO(int shmid, int cmd, struct shmid_ds * buf),
++ TP_ARGS(shmid, cmd, buf),
++ TP_STRUCT__entry(__field(int, shmid) __field(int, cmd) __field_hex(struct shmid_ds *, buf)),
++ TP_fast_assign(tp_assign(shmid, shmid) tp_assign(cmd, cmd) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setitimer
++SC_TRACE_EVENT(sys_setitimer,
++ TP_PROTO(int which, struct itimerval * value, struct itimerval * ovalue),
++ TP_ARGS(which, value, ovalue),
++ TP_STRUCT__entry(__field(int, which) __field_hex(struct itimerval *, value) __field_hex(struct itimerval *, ovalue)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(value, value) tp_assign(ovalue, ovalue)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_connect
++SC_TRACE_EVENT(sys_connect,
++ TP_PROTO(int fd, struct sockaddr * uservaddr, int addrlen),
++ TP_ARGS(fd, uservaddr, addrlen),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, uservaddr) __field_hex(int, addrlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(uservaddr, uservaddr) tp_assign(addrlen, addrlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_accept
++SC_TRACE_EVENT(sys_accept,
++ TP_PROTO(int fd, struct sockaddr * upeer_sockaddr, int * upeer_addrlen),
++ TP_ARGS(fd, upeer_sockaddr, upeer_addrlen),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, upeer_sockaddr) __field_hex(int *, upeer_addrlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(upeer_sockaddr, upeer_sockaddr) tp_assign(upeer_addrlen, upeer_addrlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sendmsg
++SC_TRACE_EVENT(sys_sendmsg,
++ TP_PROTO(int fd, struct msghdr * msg, unsigned int flags),
++ TP_ARGS(fd, msg, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct msghdr *, msg) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(msg, msg) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_recvmsg
++SC_TRACE_EVENT(sys_recvmsg,
++ TP_PROTO(int fd, struct msghdr * msg, unsigned int flags),
++ TP_ARGS(fd, msg, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct msghdr *, msg) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(msg, msg) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_bind
++SC_TRACE_EVENT(sys_bind,
++ TP_PROTO(int fd, struct sockaddr * umyaddr, int addrlen),
++ TP_ARGS(fd, umyaddr, addrlen),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, umyaddr) __field_hex(int, addrlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(umyaddr, umyaddr) tp_assign(addrlen, addrlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getsockname
++SC_TRACE_EVENT(sys_getsockname,
++ TP_PROTO(int fd, struct sockaddr * usockaddr, int * usockaddr_len),
++ TP_ARGS(fd, usockaddr, usockaddr_len),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, usockaddr) __field_hex(int *, usockaddr_len)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(usockaddr, usockaddr) tp_assign(usockaddr_len, usockaddr_len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getpeername
++SC_TRACE_EVENT(sys_getpeername,
++ TP_PROTO(int fd, struct sockaddr * usockaddr, int * usockaddr_len),
++ TP_ARGS(fd, usockaddr, usockaddr_len),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, usockaddr) __field_hex(int *, usockaddr_len)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(usockaddr, usockaddr) tp_assign(usockaddr_len, usockaddr_len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_semop
++SC_TRACE_EVENT(sys_semop,
++ TP_PROTO(int semid, struct sembuf * tsops, unsigned nsops),
++ TP_ARGS(semid, tsops, nsops),
++ TP_STRUCT__entry(__field(int, semid) __field_hex(struct sembuf *, tsops) __field(unsigned, nsops)),
++ TP_fast_assign(tp_assign(semid, semid) tp_assign(tsops, tsops) tp_assign(nsops, nsops)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_msgctl
++SC_TRACE_EVENT(sys_msgctl,
++ TP_PROTO(int msqid, int cmd, struct msqid_ds * buf),
++ TP_ARGS(msqid, cmd, buf),
++ TP_STRUCT__entry(__field(int, msqid) __field(int, cmd) __field_hex(struct msqid_ds *, buf)),
++ TP_fast_assign(tp_assign(msqid, msqid) tp_assign(cmd, cmd) tp_assign(buf, buf)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getdents
++SC_TRACE_EVENT(sys_getdents,
++ TP_PROTO(unsigned int fd, struct linux_dirent * dirent, unsigned int count),
++ TP_ARGS(fd, dirent, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct linux_dirent *, dirent) __field(unsigned int, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_readlink
++SC_TRACE_EVENT(sys_readlink,
++ TP_PROTO(const char * path, char * buf, int bufsiz),
++ TP_ARGS(path, buf, bufsiz),
++ TP_STRUCT__entry(__string_from_user(path, path) __field_hex(char *, buf) __field(int, bufsiz)),
++ TP_fast_assign(tp_copy_string_from_user(path, path) tp_assign(buf, buf) tp_assign(bufsiz, bufsiz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_chown
++SC_TRACE_EVENT(sys_chown,
++ TP_PROTO(const char * filename, uid_t user, gid_t group),
++ TP_ARGS(filename, user, group),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_lchown
++SC_TRACE_EVENT(sys_lchown,
++ TP_PROTO(const char * filename, uid_t user, gid_t group),
++ TP_ARGS(filename, user, group),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_syslog
++SC_TRACE_EVENT(sys_syslog,
++ TP_PROTO(int type, char * buf, int len),
++ TP_ARGS(type, buf, len),
++ TP_STRUCT__entry(__field(int, type) __field_hex(char *, buf) __field(int, len)),
++ TP_fast_assign(tp_assign(type, type) tp_assign(buf, buf) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getresuid
++SC_TRACE_EVENT(sys_getresuid,
++ TP_PROTO(uid_t * ruidp, uid_t * euidp, uid_t * suidp),
++ TP_ARGS(ruidp, euidp, suidp),
++ TP_STRUCT__entry(__field_hex(uid_t *, ruidp) __field_hex(uid_t *, euidp) __field_hex(uid_t *, suidp)),
++ TP_fast_assign(tp_assign(ruidp, ruidp) tp_assign(euidp, euidp) tp_assign(suidp, suidp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getresgid
++SC_TRACE_EVENT(sys_getresgid,
++ TP_PROTO(gid_t * rgidp, gid_t * egidp, gid_t * sgidp),
++ TP_ARGS(rgidp, egidp, sgidp),
++ TP_STRUCT__entry(__field_hex(gid_t *, rgidp) __field_hex(gid_t *, egidp) __field_hex(gid_t *, sgidp)),
++ TP_fast_assign(tp_assign(rgidp, rgidp) tp_assign(egidp, egidp) tp_assign(sgidp, sgidp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_rt_sigqueueinfo
++SC_TRACE_EVENT(sys_rt_sigqueueinfo,
++ TP_PROTO(pid_t pid, int sig, siginfo_t * uinfo),
++ TP_ARGS(pid, sig, uinfo),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, sig) __field_hex(siginfo_t *, uinfo)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(sig, sig) tp_assign(uinfo, uinfo)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mknod
++SC_TRACE_EVENT(sys_mknod,
++ TP_PROTO(const char * filename, umode_t mode, unsigned dev),
++ TP_ARGS(filename, mode, dev),
++ TP_STRUCT__entry(__string_from_user(filename, filename) __field(umode_t, mode) __field(unsigned, dev)),
++ TP_fast_assign(tp_copy_string_from_user(filename, filename) tp_assign(mode, mode) tp_assign(dev, dev)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sched_setscheduler
++SC_TRACE_EVENT(sys_sched_setscheduler,
++ TP_PROTO(pid_t pid, int policy, struct sched_param * param),
++ TP_ARGS(pid, policy, param),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(int, policy) __field_hex(struct sched_param *, param)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(policy, policy) tp_assign(param, param)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_init_module
++SC_TRACE_EVENT(sys_init_module,
++ TP_PROTO(void * umod, unsigned long len, const char * uargs),
++ TP_ARGS(umod, len, uargs),
++ TP_STRUCT__entry(__field_hex(void *, umod) __field(unsigned long, len) __field_hex(const char *, uargs)),
++ TP_fast_assign(tp_assign(umod, umod) tp_assign(len, len) tp_assign(uargs, uargs)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_listxattr
++SC_TRACE_EVENT(sys_listxattr,
++ TP_PROTO(const char * pathname, char * list, size_t size),
++ TP_ARGS(pathname, list, size),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(char *, list) __field(size_t, size)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(list, list) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_llistxattr
++SC_TRACE_EVENT(sys_llistxattr,
++ TP_PROTO(const char * pathname, char * list, size_t size),
++ TP_ARGS(pathname, list, size),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __field_hex(char *, list) __field(size_t, size)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_assign(list, list) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_flistxattr
++SC_TRACE_EVENT(sys_flistxattr,
++ TP_PROTO(int fd, char * list, size_t size),
++ TP_ARGS(fd, list, size),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(char *, list) __field(size_t, size)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(list, list) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sched_setaffinity
++SC_TRACE_EVENT(sys_sched_setaffinity,
++ TP_PROTO(pid_t pid, unsigned int len, unsigned long * user_mask_ptr),
++ TP_ARGS(pid, len, user_mask_ptr),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, len) __field_hex(unsigned long *, user_mask_ptr)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(len, len) tp_assign(user_mask_ptr, user_mask_ptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sched_getaffinity
++SC_TRACE_EVENT(sys_sched_getaffinity,
++ TP_PROTO(pid_t pid, unsigned int len, unsigned long * user_mask_ptr),
++ TP_ARGS(pid, len, user_mask_ptr),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, len) __field_hex(unsigned long *, user_mask_ptr)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(len, len) tp_assign(user_mask_ptr, user_mask_ptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_io_submit
++SC_TRACE_EVENT(sys_io_submit,
++ TP_PROTO(aio_context_t ctx_id, long nr, struct iocb * * iocbpp),
++ TP_ARGS(ctx_id, nr, iocbpp),
++ TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field(long, nr) __field_hex(struct iocb * *, iocbpp)),
++ TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(nr, nr) tp_assign(iocbpp, iocbpp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_io_cancel
++SC_TRACE_EVENT(sys_io_cancel,
++ TP_PROTO(aio_context_t ctx_id, struct iocb * iocb, struct io_event * result),
++ TP_ARGS(ctx_id, iocb, result),
++ TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field_hex(struct iocb *, iocb) __field_hex(struct io_event *, result)),
++ TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(iocb, iocb) tp_assign(result, result)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_lookup_dcookie
++SC_TRACE_EVENT(sys_lookup_dcookie,
++ TP_PROTO(u64 cookie64, char * buf, size_t len),
++ TP_ARGS(cookie64, buf, len),
++ TP_STRUCT__entry(__field(u64, cookie64) __field_hex(char *, buf) __field(size_t, len)),
++ TP_fast_assign(tp_assign(cookie64, cookie64) tp_assign(buf, buf) tp_assign(len, len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getdents64
++SC_TRACE_EVENT(sys_getdents64,
++ TP_PROTO(unsigned int fd, struct linux_dirent64 * dirent, unsigned int count),
++ TP_ARGS(fd, dirent, count),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(struct linux_dirent64 *, dirent) __field(unsigned int, count)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(dirent, dirent) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_timer_create
++SC_TRACE_EVENT(sys_timer_create,
++ TP_PROTO(const clockid_t which_clock, struct sigevent * timer_event_spec, timer_t * created_timer_id),
++ TP_ARGS(which_clock, timer_event_spec, created_timer_id),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field_hex(struct sigevent *, timer_event_spec) __field_hex(timer_t *, created_timer_id)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(timer_event_spec, timer_event_spec) tp_assign(created_timer_id, created_timer_id)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_set_mempolicy
++SC_TRACE_EVENT(sys_set_mempolicy,
++ TP_PROTO(int mode, unsigned long * nmask, unsigned long maxnode),
++ TP_ARGS(mode, nmask, maxnode),
++ TP_STRUCT__entry(__field(int, mode) __field_hex(unsigned long *, nmask) __field(unsigned long, maxnode)),
++ TP_fast_assign(tp_assign(mode, mode) tp_assign(nmask, nmask) tp_assign(maxnode, maxnode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mq_getsetattr
++SC_TRACE_EVENT(sys_mq_getsetattr,
++ TP_PROTO(mqd_t mqdes, const struct mq_attr * u_mqstat, struct mq_attr * u_omqstat),
++ TP_ARGS(mqdes, u_mqstat, u_omqstat),
++ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const struct mq_attr *, u_mqstat) __field_hex(struct mq_attr *, u_omqstat)),
++ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_mqstat, u_mqstat) tp_assign(u_omqstat, u_omqstat)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_inotify_add_watch
++SC_TRACE_EVENT(sys_inotify_add_watch,
++ TP_PROTO(int fd, const char * pathname, u32 mask),
++ TP_ARGS(fd, pathname, mask),
++ TP_STRUCT__entry(__field(int, fd) __string_from_user(pathname, pathname) __field(u32, mask)),
++ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(pathname, pathname) tp_assign(mask, mask)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mkdirat
++SC_TRACE_EVENT(sys_mkdirat,
++ TP_PROTO(int dfd, const char * pathname, umode_t mode),
++ TP_ARGS(dfd, pathname, mode),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field(umode_t, mode)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_futimesat
++SC_TRACE_EVENT(sys_futimesat,
++ TP_PROTO(int dfd, const char * filename, struct timeval * utimes),
++ TP_ARGS(dfd, filename, utimes),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct timeval *, utimes)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_unlinkat
++SC_TRACE_EVENT(sys_unlinkat,
++ TP_PROTO(int dfd, const char * pathname, int flag),
++ TP_ARGS(dfd, pathname, flag),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field(int, flag)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(flag, flag)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_symlinkat
++SC_TRACE_EVENT(sys_symlinkat,
++ TP_PROTO(const char * oldname, int newdfd, const char * newname),
++ TP_ARGS(oldname, newdfd, newname),
++ TP_STRUCT__entry(__string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fchmodat
++SC_TRACE_EVENT(sys_fchmodat,
++ TP_PROTO(int dfd, const char * filename, umode_t mode),
++ TP_ARGS(dfd, filename, mode),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(umode_t, mode)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_faccessat
++SC_TRACE_EVENT(sys_faccessat,
++ TP_PROTO(int dfd, const char * filename, int mode),
++ TP_ARGS(dfd, filename, mode),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, mode)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_get_robust_list
++SC_TRACE_EVENT(sys_get_robust_list,
++ TP_PROTO(int pid, struct robust_list_head * * head_ptr, size_t * len_ptr),
++ TP_ARGS(pid, head_ptr, len_ptr),
++ TP_STRUCT__entry(__field(int, pid) __field_hex(struct robust_list_head * *, head_ptr) __field_hex(size_t *, len_ptr)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(head_ptr, head_ptr) tp_assign(len_ptr, len_ptr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_signalfd
++SC_TRACE_EVENT(sys_signalfd,
++ TP_PROTO(int ufd, sigset_t * user_mask, size_t sizemask),
++ TP_ARGS(ufd, user_mask, sizemask),
++ TP_STRUCT__entry(__field(int, ufd) __field_hex(sigset_t *, user_mask) __field(size_t, sizemask)),
++ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(user_mask, user_mask) tp_assign(sizemask, sizemask)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_open_by_handle_at
++SC_TRACE_EVENT(sys_open_by_handle_at,
++ TP_PROTO(int mountdirfd, struct file_handle * handle, int flags),
++ TP_ARGS(mountdirfd, handle, flags),
++ TP_STRUCT__entry(__field(int, mountdirfd) __field_hex(struct file_handle *, handle) __field(int, flags)),
++ TP_fast_assign(tp_assign(mountdirfd, mountdirfd) tp_assign(handle, handle) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getcpu
++SC_TRACE_EVENT(sys_getcpu,
++ TP_PROTO(unsigned * cpup, unsigned * nodep, struct getcpu_cache * unused),
++ TP_ARGS(cpup, nodep, unused),
++ TP_STRUCT__entry(__field_hex(unsigned *, cpup) __field_hex(unsigned *, nodep) __field_hex(struct getcpu_cache *, unused)),
++ TP_fast_assign(tp_assign(cpup, cpup) tp_assign(nodep, nodep) tp_assign(unused, unused)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_finit_module
++SC_TRACE_EVENT(sys_finit_module,
++ TP_PROTO(int fd, const char * uargs, int flags),
++ TP_ARGS(fd, uargs, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(const char *, uargs) __field(int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(uargs, uargs) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_rt_sigaction
++SC_TRACE_EVENT(sys_rt_sigaction,
++ TP_PROTO(int sig, const struct sigaction * act, struct sigaction * oact, size_t sigsetsize),
++ TP_ARGS(sig, act, oact, sigsetsize),
++ TP_STRUCT__entry(__field(int, sig) __field_hex(const struct sigaction *, act) __field_hex(struct sigaction *, oact) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(sig, sig) tp_assign(act, act) tp_assign(oact, oact) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_rt_sigprocmask
++SC_TRACE_EVENT(sys_rt_sigprocmask,
++ TP_PROTO(int how, sigset_t * nset, sigset_t * oset, size_t sigsetsize),
++ TP_ARGS(how, nset, oset, sigsetsize),
++ TP_STRUCT__entry(__field(int, how) __field_hex(sigset_t *, nset) __field_hex(sigset_t *, oset) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(how, how) tp_assign(nset, nset) tp_assign(oset, oset) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_pread64
++SC_TRACE_EVENT(sys_pread64,
++ TP_PROTO(unsigned int fd, char * buf, size_t count, loff_t pos),
++ TP_ARGS(fd, buf, count, pos),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(char *, buf) __field(size_t, count) __field(loff_t, pos)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count) tp_assign(pos, pos)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_pwrite64
++SC_TRACE_EVENT(sys_pwrite64,
++ TP_PROTO(unsigned int fd, const char * buf, size_t count, loff_t pos),
++ TP_ARGS(fd, buf, count, pos),
++ TP_STRUCT__entry(__field(unsigned int, fd) __field_hex(const char *, buf) __field(size_t, count) __field(loff_t, pos)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buf, buf) tp_assign(count, count) tp_assign(pos, pos)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sendfile64
++SC_TRACE_EVENT(sys_sendfile64,
++ TP_PROTO(int out_fd, int in_fd, loff_t * offset, size_t count),
++ TP_ARGS(out_fd, in_fd, offset, count),
++ TP_STRUCT__entry(__field(int, out_fd) __field(int, in_fd) __field_hex(loff_t *, offset) __field(size_t, count)),
++ TP_fast_assign(tp_assign(out_fd, out_fd) tp_assign(in_fd, in_fd) tp_assign(offset, offset) tp_assign(count, count)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_socketpair
++SC_TRACE_EVENT(sys_socketpair,
++ TP_PROTO(int family, int type, int protocol, int * usockvec),
++ TP_ARGS(family, type, protocol, usockvec),
++ TP_STRUCT__entry(__field(int, family) __field(int, type) __field(int, protocol) __field_hex(int *, usockvec)),
++ TP_fast_assign(tp_assign(family, family) tp_assign(type, type) tp_assign(protocol, protocol) tp_assign(usockvec, usockvec)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_wait4
++SC_TRACE_EVENT(sys_wait4,
++ TP_PROTO(pid_t upid, int * stat_addr, int options, struct rusage * ru),
++ TP_ARGS(upid, stat_addr, options, ru),
++ TP_STRUCT__entry(__field(pid_t, upid) __field_hex(int *, stat_addr) __field(int, options) __field_hex(struct rusage *, ru)),
++ TP_fast_assign(tp_assign(upid, upid) tp_assign(stat_addr, stat_addr) tp_assign(options, options) tp_assign(ru, ru)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_msgsnd
++SC_TRACE_EVENT(sys_msgsnd,
++ TP_PROTO(int msqid, struct msgbuf * msgp, size_t msgsz, int msgflg),
++ TP_ARGS(msqid, msgp, msgsz, msgflg),
++ TP_STRUCT__entry(__field(int, msqid) __field_hex(struct msgbuf *, msgp) __field(size_t, msgsz) __field(int, msgflg)),
++ TP_fast_assign(tp_assign(msqid, msqid) tp_assign(msgp, msgp) tp_assign(msgsz, msgsz) tp_assign(msgflg, msgflg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_rt_sigtimedwait
++SC_TRACE_EVENT(sys_rt_sigtimedwait,
++ TP_PROTO(const sigset_t * uthese, siginfo_t * uinfo, const struct timespec * uts, size_t sigsetsize),
++ TP_ARGS(uthese, uinfo, uts, sigsetsize),
++ TP_STRUCT__entry(__field_hex(const sigset_t *, uthese) __field_hex(siginfo_t *, uinfo) __field_hex(const struct timespec *, uts) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(uthese, uthese) tp_assign(uinfo, uinfo) tp_assign(uts, uts) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_reboot
++SC_TRACE_EVENT(sys_reboot,
++ TP_PROTO(int magic1, int magic2, unsigned int cmd, void * arg),
++ TP_ARGS(magic1, magic2, cmd, arg),
++ TP_STRUCT__entry(__field(int, magic1) __field(int, magic2) __field(unsigned int, cmd) __field_hex(void *, arg)),
++ TP_fast_assign(tp_assign(magic1, magic1) tp_assign(magic2, magic2) tp_assign(cmd, cmd) tp_assign(arg, arg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_quotactl
++SC_TRACE_EVENT(sys_quotactl,
++ TP_PROTO(unsigned int cmd, const char * special, qid_t id, void * addr),
++ TP_ARGS(cmd, special, id, addr),
++ TP_STRUCT__entry(__field(unsigned int, cmd) __field_hex(const char *, special) __field(qid_t, id) __field_hex(void *, addr)),
++ TP_fast_assign(tp_assign(cmd, cmd) tp_assign(special, special) tp_assign(id, id) tp_assign(addr, addr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getxattr
++SC_TRACE_EVENT(sys_getxattr,
++ TP_PROTO(const char * pathname, const char * name, void * value, size_t size),
++ TP_ARGS(pathname, name, value, size),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_lgetxattr
++SC_TRACE_EVENT(sys_lgetxattr,
++ TP_PROTO(const char * pathname, const char * name, void * value, size_t size),
++ TP_ARGS(pathname, name, value, size),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fgetxattr
++SC_TRACE_EVENT(sys_fgetxattr,
++ TP_PROTO(int fd, const char * name, void * value, size_t size),
++ TP_ARGS(fd, name, value, size),
++ TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name) __field_hex(void *, value) __field(size_t, size)),
++ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_semtimedop
++SC_TRACE_EVENT(sys_semtimedop,
++ TP_PROTO(int semid, struct sembuf * tsops, unsigned nsops, const struct timespec * timeout),
++ TP_ARGS(semid, tsops, nsops, timeout),
++ TP_STRUCT__entry(__field(int, semid) __field_hex(struct sembuf *, tsops) __field(unsigned, nsops) __field_hex(const struct timespec *, timeout)),
++ TP_fast_assign(tp_assign(semid, semid) tp_assign(tsops, tsops) tp_assign(nsops, nsops) tp_assign(timeout, timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_timer_settime
++SC_TRACE_EVENT(sys_timer_settime,
++ TP_PROTO(timer_t timer_id, int flags, const struct itimerspec * new_setting, struct itimerspec * old_setting),
++ TP_ARGS(timer_id, flags, new_setting, old_setting),
++ TP_STRUCT__entry(__field(timer_t, timer_id) __field(int, flags) __field_hex(const struct itimerspec *, new_setting) __field_hex(struct itimerspec *, old_setting)),
++ TP_fast_assign(tp_assign(timer_id, timer_id) tp_assign(flags, flags) tp_assign(new_setting, new_setting) tp_assign(old_setting, old_setting)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_clock_nanosleep
++SC_TRACE_EVENT(sys_clock_nanosleep,
++ TP_PROTO(const clockid_t which_clock, int flags, const struct timespec * rqtp, struct timespec * rmtp),
++ TP_ARGS(which_clock, flags, rqtp, rmtp),
++ TP_STRUCT__entry(__field(const clockid_t, which_clock) __field(int, flags) __field_hex(const struct timespec *, rqtp) __field_hex(struct timespec *, rmtp)),
++ TP_fast_assign(tp_assign(which_clock, which_clock) tp_assign(flags, flags) tp_assign(rqtp, rqtp) tp_assign(rmtp, rmtp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_epoll_wait
++SC_TRACE_EVENT(sys_epoll_wait,
++ TP_PROTO(int epfd, struct epoll_event * events, int maxevents, int timeout),
++ TP_ARGS(epfd, events, maxevents, timeout),
++ TP_STRUCT__entry(__field(int, epfd) __field_hex(struct epoll_event *, events) __field(int, maxevents) __field(int, timeout)),
++ TP_fast_assign(tp_assign(epfd, epfd) tp_assign(events, events) tp_assign(maxevents, maxevents) tp_assign(timeout, timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_epoll_ctl
++SC_TRACE_EVENT(sys_epoll_ctl,
++ TP_PROTO(int epfd, int op, int fd, struct epoll_event * event),
++ TP_ARGS(epfd, op, fd, event),
++ TP_STRUCT__entry(__field(int, epfd) __field(int, op) __field(int, fd) __field_hex(struct epoll_event *, event)),
++ TP_fast_assign(tp_assign(epfd, epfd) tp_assign(op, op) tp_assign(fd, fd) tp_assign(event, event)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mq_open
++SC_TRACE_EVENT(sys_mq_open,
++ TP_PROTO(const char * u_name, int oflag, umode_t mode, struct mq_attr * u_attr),
++ TP_ARGS(u_name, oflag, mode, u_attr),
++ TP_STRUCT__entry(__string_from_user(u_name, u_name) __field(int, oflag) __field(umode_t, mode) __field_hex(struct mq_attr *, u_attr)),
++ TP_fast_assign(tp_copy_string_from_user(u_name, u_name) tp_assign(oflag, oflag) tp_assign(mode, mode) tp_assign(u_attr, u_attr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_kexec_load
++SC_TRACE_EVENT(sys_kexec_load,
++ TP_PROTO(unsigned long entry, unsigned long nr_segments, struct kexec_segment * segments, unsigned long flags),
++ TP_ARGS(entry, nr_segments, segments, flags),
++ TP_STRUCT__entry(__field(unsigned long, entry) __field(unsigned long, nr_segments) __field_hex(struct kexec_segment *, segments) __field(unsigned long, flags)),
++ TP_fast_assign(tp_assign(entry, entry) tp_assign(nr_segments, nr_segments) tp_assign(segments, segments) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_request_key
++SC_TRACE_EVENT(sys_request_key,
++ TP_PROTO(const char * _type, const char * _description, const char * _callout_info, key_serial_t destringid),
++ TP_ARGS(_type, _description, _callout_info, destringid),
++ TP_STRUCT__entry(__string_from_user(_type, _type) __field_hex(const char *, _description) __field_hex(const char *, _callout_info) __field(key_serial_t, destringid)),
++ TP_fast_assign(tp_copy_string_from_user(_type, _type) tp_assign(_description, _description) tp_assign(_callout_info, _callout_info) tp_assign(destringid, destringid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_migrate_pages
++SC_TRACE_EVENT(sys_migrate_pages,
++ TP_PROTO(pid_t pid, unsigned long maxnode, const unsigned long * old_nodes, const unsigned long * new_nodes),
++ TP_ARGS(pid, maxnode, old_nodes, new_nodes),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned long, maxnode) __field_hex(const unsigned long *, old_nodes) __field_hex(const unsigned long *, new_nodes)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(maxnode, maxnode) tp_assign(old_nodes, old_nodes) tp_assign(new_nodes, new_nodes)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_openat
++SC_TRACE_EVENT(sys_openat,
++ TP_PROTO(int dfd, const char * filename, int flags, umode_t mode),
++ TP_ARGS(dfd, filename, flags, mode),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(int, flags) __field(umode_t, mode)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(flags, flags) tp_assign(mode, mode)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mknodat
++SC_TRACE_EVENT(sys_mknodat,
++ TP_PROTO(int dfd, const char * filename, umode_t mode, unsigned dev),
++ TP_ARGS(dfd, filename, mode, dev),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(umode_t, mode) __field(unsigned, dev)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(mode, mode) tp_assign(dev, dev)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_newfstatat
++SC_TRACE_EVENT(sys_newfstatat,
++ TP_PROTO(int dfd, const char * filename, struct stat * statbuf, int flag),
++ TP_ARGS(dfd, filename, statbuf, flag),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct stat *, statbuf) __field(int, flag)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(statbuf, statbuf) tp_assign(flag, flag)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_renameat
++SC_TRACE_EVENT(sys_renameat,
++ TP_PROTO(int olddfd, const char * oldname, int newdfd, const char * newname),
++ TP_ARGS(olddfd, oldname, newdfd, newname),
++ TP_STRUCT__entry(__field(int, olddfd) __string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname)),
++ TP_fast_assign(tp_assign(olddfd, olddfd) tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_readlinkat
++SC_TRACE_EVENT(sys_readlinkat,
++ TP_PROTO(int dfd, const char * pathname, char * buf, int bufsiz),
++ TP_ARGS(dfd, pathname, buf, bufsiz),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(pathname, pathname) __field_hex(char *, buf) __field(int, bufsiz)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname) tp_assign(buf, buf) tp_assign(bufsiz, bufsiz)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_vmsplice
++SC_TRACE_EVENT(sys_vmsplice,
++ TP_PROTO(int fd, const struct iovec * iov, unsigned long nr_segs, unsigned int flags),
++ TP_ARGS(fd, iov, nr_segs, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(const struct iovec *, iov) __field(unsigned long, nr_segs) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(iov, iov) tp_assign(nr_segs, nr_segs) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_utimensat
++SC_TRACE_EVENT(sys_utimensat,
++ TP_PROTO(int dfd, const char * filename, struct timespec * utimes, int flags),
++ TP_ARGS(dfd, filename, utimes, flags),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field_hex(struct timespec *, utimes) __field(int, flags)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(utimes, utimes) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_timerfd_settime
++SC_TRACE_EVENT(sys_timerfd_settime,
++ TP_PROTO(int ufd, int flags, const struct itimerspec * utmr, struct itimerspec * otmr),
++ TP_ARGS(ufd, flags, utmr, otmr),
++ TP_STRUCT__entry(__field(int, ufd) __field(int, flags) __field_hex(const struct itimerspec *, utmr) __field_hex(struct itimerspec *, otmr)),
++ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(flags, flags) tp_assign(utmr, utmr) tp_assign(otmr, otmr)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_accept4
++SC_TRACE_EVENT(sys_accept4,
++ TP_PROTO(int fd, struct sockaddr * upeer_sockaddr, int * upeer_addrlen, int flags),
++ TP_ARGS(fd, upeer_sockaddr, upeer_addrlen, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct sockaddr *, upeer_sockaddr) __field_hex(int *, upeer_addrlen) __field(int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(upeer_sockaddr, upeer_sockaddr) tp_assign(upeer_addrlen, upeer_addrlen) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_signalfd4
++SC_TRACE_EVENT(sys_signalfd4,
++ TP_PROTO(int ufd, sigset_t * user_mask, size_t sizemask, int flags),
++ TP_ARGS(ufd, user_mask, sizemask, flags),
++ TP_STRUCT__entry(__field(int, ufd) __field_hex(sigset_t *, user_mask) __field(size_t, sizemask) __field(int, flags)),
++ TP_fast_assign(tp_assign(ufd, ufd) tp_assign(user_mask, user_mask) tp_assign(sizemask, sizemask) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_rt_tgsigqueueinfo
++SC_TRACE_EVENT(sys_rt_tgsigqueueinfo,
++ TP_PROTO(pid_t tgid, pid_t pid, int sig, siginfo_t * uinfo),
++ TP_ARGS(tgid, pid, sig, uinfo),
++ TP_STRUCT__entry(__field(pid_t, tgid) __field(pid_t, pid) __field(int, sig) __field_hex(siginfo_t *, uinfo)),
++ TP_fast_assign(tp_assign(tgid, tgid) tp_assign(pid, pid) tp_assign(sig, sig) tp_assign(uinfo, uinfo)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_prlimit64
++SC_TRACE_EVENT(sys_prlimit64,
++ TP_PROTO(pid_t pid, unsigned int resource, const struct rlimit64 * new_rlim, struct rlimit64 * old_rlim),
++ TP_ARGS(pid, resource, new_rlim, old_rlim),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned int, resource) __field_hex(const struct rlimit64 *, new_rlim) __field_hex(struct rlimit64 *, old_rlim)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(resource, resource) tp_assign(new_rlim, new_rlim) tp_assign(old_rlim, old_rlim)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sendmmsg
++SC_TRACE_EVENT(sys_sendmmsg,
++ TP_PROTO(int fd, struct mmsghdr * mmsg, unsigned int vlen, unsigned int flags),
++ TP_ARGS(fd, mmsg, vlen, flags),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct mmsghdr *, mmsg) __field(unsigned int, vlen) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(mmsg, mmsg) tp_assign(vlen, vlen) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_select
++SC_TRACE_EVENT(sys_select,
++ TP_PROTO(int n, fd_set * inp, fd_set * outp, fd_set * exp, struct timeval * tvp),
++ TP_ARGS(n, inp, outp, exp, tvp),
++ TP_STRUCT__entry(__field(int, n) __field_hex(fd_set *, inp) __field_hex(fd_set *, outp) __field_hex(fd_set *, exp) __field_hex(struct timeval *, tvp)),
++ TP_fast_assign(tp_assign(n, n) tp_assign(inp, inp) tp_assign(outp, outp) tp_assign(exp, exp) tp_assign(tvp, tvp)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setsockopt
++SC_TRACE_EVENT(sys_setsockopt,
++ TP_PROTO(int fd, int level, int optname, char * optval, int optlen),
++ TP_ARGS(fd, level, optname, optval, optlen),
++ TP_STRUCT__entry(__field(int, fd) __field(int, level) __field(int, optname) __field_hex(char *, optval) __field(int, optlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(level, level) tp_assign(optname, optname) tp_assign(optval, optval) tp_assign(optlen, optlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_getsockopt
++SC_TRACE_EVENT(sys_getsockopt,
++ TP_PROTO(int fd, int level, int optname, char * optval, int * optlen),
++ TP_ARGS(fd, level, optname, optval, optlen),
++ TP_STRUCT__entry(__field(int, fd) __field(int, level) __field(int, optname) __field_hex(char *, optval) __field_hex(int *, optlen)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(level, level) tp_assign(optname, optname) tp_assign(optval, optval) tp_assign(optlen, optlen)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_msgrcv
++SC_TRACE_EVENT(sys_msgrcv,
++ TP_PROTO(int msqid, struct msgbuf * msgp, size_t msgsz, long msgtyp, int msgflg),
++ TP_ARGS(msqid, msgp, msgsz, msgtyp, msgflg),
++ TP_STRUCT__entry(__field(int, msqid) __field_hex(struct msgbuf *, msgp) __field(size_t, msgsz) __field(long, msgtyp) __field(int, msgflg)),
++ TP_fast_assign(tp_assign(msqid, msqid) tp_assign(msgp, msgp) tp_assign(msgsz, msgsz) tp_assign(msgtyp, msgtyp) tp_assign(msgflg, msgflg)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mount
++SC_TRACE_EVENT(sys_mount,
++ TP_PROTO(char * dev_name, char * dir_name, char * type, unsigned long flags, void * data),
++ TP_ARGS(dev_name, dir_name, type, flags, data),
++ TP_STRUCT__entry(__string_from_user(dev_name, dev_name) __string_from_user(dir_name, dir_name) __string_from_user(type, type) __field(unsigned long, flags) __field_hex(void *, data)),
++ TP_fast_assign(tp_copy_string_from_user(dev_name, dev_name) tp_copy_string_from_user(dir_name, dir_name) tp_copy_string_from_user(type, type) tp_assign(flags, flags) tp_assign(data, data)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_setxattr
++SC_TRACE_EVENT(sys_setxattr,
++ TP_PROTO(const char * pathname, const char * name, const void * value, size_t size, int flags),
++ TP_ARGS(pathname, name, value, size, flags),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_lsetxattr
++SC_TRACE_EVENT(sys_lsetxattr,
++ TP_PROTO(const char * pathname, const char * name, const void * value, size_t size, int flags),
++ TP_ARGS(pathname, name, value, size, flags),
++ TP_STRUCT__entry(__string_from_user(pathname, pathname) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
++ TP_fast_assign(tp_copy_string_from_user(pathname, pathname) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fsetxattr
++SC_TRACE_EVENT(sys_fsetxattr,
++ TP_PROTO(int fd, const char * name, const void * value, size_t size, int flags),
++ TP_ARGS(fd, name, value, size, flags),
++ TP_STRUCT__entry(__field(int, fd) __string_from_user(name, name) __field_hex(const void *, value) __field(size_t, size) __field(int, flags)),
++ TP_fast_assign(tp_assign(fd, fd) tp_copy_string_from_user(name, name) tp_assign(value, value) tp_assign(size, size) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_io_getevents
++SC_TRACE_EVENT(sys_io_getevents,
++ TP_PROTO(aio_context_t ctx_id, long min_nr, long nr, struct io_event * events, struct timespec * timeout),
++ TP_ARGS(ctx_id, min_nr, nr, events, timeout),
++ TP_STRUCT__entry(__field(aio_context_t, ctx_id) __field(long, min_nr) __field(long, nr) __field_hex(struct io_event *, events) __field_hex(struct timespec *, timeout)),
++ TP_fast_assign(tp_assign(ctx_id, ctx_id) tp_assign(min_nr, min_nr) tp_assign(nr, nr) tp_assign(events, events) tp_assign(timeout, timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_get_mempolicy
++SC_TRACE_EVENT(sys_get_mempolicy,
++ TP_PROTO(int * policy, unsigned long * nmask, unsigned long maxnode, unsigned long addr, unsigned long flags),
++ TP_ARGS(policy, nmask, maxnode, addr, flags),
++ TP_STRUCT__entry(__field_hex(int *, policy) __field_hex(unsigned long *, nmask) __field(unsigned long, maxnode) __field_hex(unsigned long, addr) __field(unsigned long, flags)),
++ TP_fast_assign(tp_assign(policy, policy) tp_assign(nmask, nmask) tp_assign(maxnode, maxnode) tp_assign(addr, addr) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mq_timedsend
++SC_TRACE_EVENT(sys_mq_timedsend,
++ TP_PROTO(mqd_t mqdes, const char * u_msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec * u_abs_timeout),
++ TP_ARGS(mqdes, u_msg_ptr, msg_len, msg_prio, u_abs_timeout),
++ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(const char *, u_msg_ptr) __field(size_t, msg_len) __field(unsigned int, msg_prio) __field_hex(const struct timespec *, u_abs_timeout)),
++ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_msg_ptr, u_msg_ptr) tp_assign(msg_len, msg_len) tp_assign(msg_prio, msg_prio) tp_assign(u_abs_timeout, u_abs_timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mq_timedreceive
++SC_TRACE_EVENT(sys_mq_timedreceive,
++ TP_PROTO(mqd_t mqdes, char * u_msg_ptr, size_t msg_len, unsigned int * u_msg_prio, const struct timespec * u_abs_timeout),
++ TP_ARGS(mqdes, u_msg_ptr, msg_len, u_msg_prio, u_abs_timeout),
++ TP_STRUCT__entry(__field(mqd_t, mqdes) __field_hex(char *, u_msg_ptr) __field(size_t, msg_len) __field_hex(unsigned int *, u_msg_prio) __field_hex(const struct timespec *, u_abs_timeout)),
++ TP_fast_assign(tp_assign(mqdes, mqdes) tp_assign(u_msg_ptr, u_msg_ptr) tp_assign(msg_len, msg_len) tp_assign(u_msg_prio, u_msg_prio) tp_assign(u_abs_timeout, u_abs_timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_waitid
++SC_TRACE_EVENT(sys_waitid,
++ TP_PROTO(int which, pid_t upid, struct siginfo * infop, int options, struct rusage * ru),
++ TP_ARGS(which, upid, infop, options, ru),
++ TP_STRUCT__entry(__field(int, which) __field(pid_t, upid) __field_hex(struct siginfo *, infop) __field(int, options) __field_hex(struct rusage *, ru)),
++ TP_fast_assign(tp_assign(which, which) tp_assign(upid, upid) tp_assign(infop, infop) tp_assign(options, options) tp_assign(ru, ru)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_add_key
++SC_TRACE_EVENT(sys_add_key,
++ TP_PROTO(const char * _type, const char * _description, const void * _payload, size_t plen, key_serial_t ringid),
++ TP_ARGS(_type, _description, _payload, plen, ringid),
++ TP_STRUCT__entry(__string_from_user(_type, _type) __field_hex(const char *, _description) __field_hex(const void *, _payload) __field(size_t, plen) __field(key_serial_t, ringid)),
++ TP_fast_assign(tp_copy_string_from_user(_type, _type) tp_assign(_description, _description) tp_assign(_payload, _payload) tp_assign(plen, plen) tp_assign(ringid, ringid)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fchownat
++SC_TRACE_EVENT(sys_fchownat,
++ TP_PROTO(int dfd, const char * filename, uid_t user, gid_t group, int flag),
++ TP_ARGS(dfd, filename, user, group, flag),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(filename, filename) __field(uid_t, user) __field(gid_t, group) __field(int, flag)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(filename, filename) tp_assign(user, user) tp_assign(group, group) tp_assign(flag, flag)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_linkat
++SC_TRACE_EVENT(sys_linkat,
++ TP_PROTO(int olddfd, const char * oldname, int newdfd, const char * newname, int flags),
++ TP_ARGS(olddfd, oldname, newdfd, newname, flags),
++ TP_STRUCT__entry(__field(int, olddfd) __string_from_user(oldname, oldname) __field(int, newdfd) __string_from_user(newname, newname) __field(int, flags)),
++ TP_fast_assign(tp_assign(olddfd, olddfd) tp_copy_string_from_user(oldname, oldname) tp_assign(newdfd, newdfd) tp_copy_string_from_user(newname, newname) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_ppoll
++SC_TRACE_EVENT(sys_ppoll,
++ TP_PROTO(struct pollfd * ufds, unsigned int nfds, struct timespec * tsp, const sigset_t * sigmask, size_t sigsetsize),
++ TP_ARGS(ufds, nfds, tsp, sigmask, sigsetsize),
++ TP_STRUCT__entry(__field_hex(struct pollfd *, ufds) __field(unsigned int, nfds) __field_hex(struct timespec *, tsp) __field_hex(const sigset_t *, sigmask) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(ufds, ufds) tp_assign(nfds, nfds) tp_assign(tsp, tsp) tp_assign(sigmask, sigmask) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_preadv
++SC_TRACE_EVENT(sys_preadv,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h),
++ TP_ARGS(fd, vec, vlen, pos_l, pos_h),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen) __field(unsigned long, pos_l) __field(unsigned long, pos_h)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen) tp_assign(pos_l, pos_l) tp_assign(pos_h, pos_h)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_pwritev
++SC_TRACE_EVENT(sys_pwritev,
++ TP_PROTO(unsigned long fd, const struct iovec * vec, unsigned long vlen, unsigned long pos_l, unsigned long pos_h),
++ TP_ARGS(fd, vec, vlen, pos_l, pos_h),
++ TP_STRUCT__entry(__field(unsigned long, fd) __field_hex(const struct iovec *, vec) __field(unsigned long, vlen) __field(unsigned long, pos_l) __field(unsigned long, pos_h)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(vec, vec) tp_assign(vlen, vlen) tp_assign(pos_l, pos_l) tp_assign(pos_h, pos_h)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_perf_event_open
++SC_TRACE_EVENT(sys_perf_event_open,
++ TP_PROTO(struct perf_event_attr * attr_uptr, pid_t pid, int cpu, int group_fd, unsigned long flags),
++ TP_ARGS(attr_uptr, pid, cpu, group_fd, flags),
++ TP_STRUCT__entry(__field_hex(struct perf_event_attr *, attr_uptr) __field(pid_t, pid) __field(int, cpu) __field(int, group_fd) __field(unsigned long, flags)),
++ TP_fast_assign(tp_assign(attr_uptr, attr_uptr) tp_assign(pid, pid) tp_assign(cpu, cpu) tp_assign(group_fd, group_fd) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_recvmmsg
++SC_TRACE_EVENT(sys_recvmmsg,
++ TP_PROTO(int fd, struct mmsghdr * mmsg, unsigned int vlen, unsigned int flags, struct timespec * timeout),
++ TP_ARGS(fd, mmsg, vlen, flags, timeout),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(struct mmsghdr *, mmsg) __field(unsigned int, vlen) __field(unsigned int, flags) __field_hex(struct timespec *, timeout)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(mmsg, mmsg) tp_assign(vlen, vlen) tp_assign(flags, flags) tp_assign(timeout, timeout)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_fanotify_mark
++SC_TRACE_EVENT(sys_fanotify_mark,
++ TP_PROTO(int fanotify_fd, unsigned int flags, __u64 mask, int dfd, const char * pathname),
++ TP_ARGS(fanotify_fd, flags, mask, dfd, pathname),
++ TP_STRUCT__entry(__field(int, fanotify_fd) __field(unsigned int, flags) __field(__u64, mask) __field(int, dfd) __string_from_user(pathname, pathname)),
++ TP_fast_assign(tp_assign(fanotify_fd, fanotify_fd) tp_assign(flags, flags) tp_assign(mask, mask) tp_assign(dfd, dfd) tp_copy_string_from_user(pathname, pathname)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_name_to_handle_at
++SC_TRACE_EVENT(sys_name_to_handle_at,
++ TP_PROTO(int dfd, const char * name, struct file_handle * handle, int * mnt_id, int flag),
++ TP_ARGS(dfd, name, handle, mnt_id, flag),
++ TP_STRUCT__entry(__field(int, dfd) __string_from_user(name, name) __field_hex(struct file_handle *, handle) __field_hex(int *, mnt_id) __field(int, flag)),
++ TP_fast_assign(tp_assign(dfd, dfd) tp_copy_string_from_user(name, name) tp_assign(handle, handle) tp_assign(mnt_id, mnt_id) tp_assign(flag, flag)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_sendto
++SC_TRACE_EVENT(sys_sendto,
++ TP_PROTO(int fd, void * buff, size_t len, unsigned int flags, struct sockaddr * addr, int addr_len),
++ TP_ARGS(fd, buff, len, flags, addr, addr_len),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(void *, buff) __field(size_t, len) __field(unsigned int, flags) __field_hex(struct sockaddr *, addr) __field_hex(int, addr_len)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(buff, buff) tp_assign(len, len) tp_assign(flags, flags) tp_assign(addr, addr) tp_assign(addr_len, addr_len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_recvfrom
++SC_TRACE_EVENT(sys_recvfrom,
++ TP_PROTO(int fd, void * ubuf, size_t size, unsigned int flags, struct sockaddr * addr, int * addr_len),
++ TP_ARGS(fd, ubuf, size, flags, addr, addr_len),
++ TP_STRUCT__entry(__field(int, fd) __field_hex(void *, ubuf) __field(size_t, size) __field(unsigned int, flags) __field_hex(struct sockaddr *, addr) __field_hex(int *, addr_len)),
++ TP_fast_assign(tp_assign(fd, fd) tp_assign(ubuf, ubuf) tp_assign(size, size) tp_assign(flags, flags) tp_assign(addr, addr) tp_assign(addr_len, addr_len)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_futex
++SC_TRACE_EVENT(sys_futex,
++ TP_PROTO(u32 * uaddr, int op, u32 val, struct timespec * utime, u32 * uaddr2, u32 val3),
++ TP_ARGS(uaddr, op, val, utime, uaddr2, val3),
++ TP_STRUCT__entry(__field_hex(u32 *, uaddr) __field(int, op) __field(u32, val) __field_hex(struct timespec *, utime) __field_hex(u32 *, uaddr2) __field(u32, val3)),
++ TP_fast_assign(tp_assign(uaddr, uaddr) tp_assign(op, op) tp_assign(val, val) tp_assign(utime, utime) tp_assign(uaddr2, uaddr2) tp_assign(val3, val3)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_mbind
++SC_TRACE_EVENT(sys_mbind,
++ TP_PROTO(unsigned long start, unsigned long len, unsigned long mode, unsigned long * nmask, unsigned long maxnode, unsigned flags),
++ TP_ARGS(start, len, mode, nmask, maxnode, flags),
++ TP_STRUCT__entry(__field(unsigned long, start) __field(unsigned long, len) __field(unsigned long, mode) __field_hex(unsigned long *, nmask) __field(unsigned long, maxnode) __field(unsigned, flags)),
++ TP_fast_assign(tp_assign(start, start) tp_assign(len, len) tp_assign(mode, mode) tp_assign(nmask, nmask) tp_assign(maxnode, maxnode) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_pselect6
++SC_TRACE_EVENT(sys_pselect6,
++ TP_PROTO(int n, fd_set * inp, fd_set * outp, fd_set * exp, struct timespec * tsp, void * sig),
++ TP_ARGS(n, inp, outp, exp, tsp, sig),
++ TP_STRUCT__entry(__field(int, n) __field_hex(fd_set *, inp) __field_hex(fd_set *, outp) __field_hex(fd_set *, exp) __field_hex(struct timespec *, tsp) __field_hex(void *, sig)),
++ TP_fast_assign(tp_assign(n, n) tp_assign(inp, inp) tp_assign(outp, outp) tp_assign(exp, exp) tp_assign(tsp, tsp) tp_assign(sig, sig)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_splice
++SC_TRACE_EVENT(sys_splice,
++ TP_PROTO(int fd_in, loff_t * off_in, int fd_out, loff_t * off_out, size_t len, unsigned int flags),
++ TP_ARGS(fd_in, off_in, fd_out, off_out, len, flags),
++ TP_STRUCT__entry(__field(int, fd_in) __field_hex(loff_t *, off_in) __field(int, fd_out) __field_hex(loff_t *, off_out) __field(size_t, len) __field(unsigned int, flags)),
++ TP_fast_assign(tp_assign(fd_in, fd_in) tp_assign(off_in, off_in) tp_assign(fd_out, fd_out) tp_assign(off_out, off_out) tp_assign(len, len) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_move_pages
++SC_TRACE_EVENT(sys_move_pages,
++ TP_PROTO(pid_t pid, unsigned long nr_pages, const void * * pages, const int * nodes, int * status, int flags),
++ TP_ARGS(pid, nr_pages, pages, nodes, status, flags),
++ TP_STRUCT__entry(__field(pid_t, pid) __field(unsigned long, nr_pages) __field_hex(const void * *, pages) __field_hex(const int *, nodes) __field_hex(int *, status) __field(int, flags)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(nr_pages, nr_pages) tp_assign(pages, pages) tp_assign(nodes, nodes) tp_assign(status, status) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_epoll_pwait
++SC_TRACE_EVENT(sys_epoll_pwait,
++ TP_PROTO(int epfd, struct epoll_event * events, int maxevents, int timeout, const sigset_t * sigmask, size_t sigsetsize),
++ TP_ARGS(epfd, events, maxevents, timeout, sigmask, sigsetsize),
++ TP_STRUCT__entry(__field(int, epfd) __field_hex(struct epoll_event *, events) __field(int, maxevents) __field(int, timeout) __field_hex(const sigset_t *, sigmask) __field(size_t, sigsetsize)),
++ TP_fast_assign(tp_assign(epfd, epfd) tp_assign(events, events) tp_assign(maxevents, maxevents) tp_assign(timeout, timeout) tp_assign(sigmask, sigmask) tp_assign(sigsetsize, sigsetsize)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_process_vm_readv
++SC_TRACE_EVENT(sys_process_vm_readv,
++ TP_PROTO(pid_t pid, const struct iovec * lvec, unsigned long liovcnt, const struct iovec * rvec, unsigned long riovcnt, unsigned long flags),
++ TP_ARGS(pid, lvec, liovcnt, rvec, riovcnt, flags),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(const struct iovec *, lvec) __field(unsigned long, liovcnt) __field_hex(const struct iovec *, rvec) __field(unsigned long, riovcnt) __field(unsigned long, flags)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(lvec, lvec) tp_assign(liovcnt, liovcnt) tp_assign(rvec, rvec) tp_assign(riovcnt, riovcnt) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++#ifndef OVERRIDE_64_sys_process_vm_writev
++SC_TRACE_EVENT(sys_process_vm_writev,
++ TP_PROTO(pid_t pid, const struct iovec * lvec, unsigned long liovcnt, const struct iovec * rvec, unsigned long riovcnt, unsigned long flags),
++ TP_ARGS(pid, lvec, liovcnt, rvec, riovcnt, flags),
++ TP_STRUCT__entry(__field(pid_t, pid) __field_hex(const struct iovec *, lvec) __field(unsigned long, liovcnt) __field_hex(const struct iovec *, rvec) __field(unsigned long, riovcnt) __field(unsigned long, flags)),
++ TP_fast_assign(tp_assign(pid, pid) tp_assign(lvec, lvec) tp_assign(liovcnt, liovcnt) tp_assign(rvec, rvec) tp_assign(riovcnt, riovcnt) tp_assign(flags, flags)),
++ TP_printk()
++)
++#endif
++
++#endif /* _TRACE_SYSCALLS_POINTERS_H */
++
++/* This part must be outside protection */
++#include "../../../probes/define_trace.h"
++
++#else /* CREATE_SYSCALL_TABLE */
++
++#include "x86-64-syscalls-3.10.0-rc7_pointers_override.h"
++#include "syscalls_pointers_override.h"
++
++#ifndef OVERRIDE_TABLE_64_sys_read
++TRACE_SYSCALL_TABLE(sys_read, sys_read, 0, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_write
++TRACE_SYSCALL_TABLE(sys_write, sys_write, 1, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_open
++TRACE_SYSCALL_TABLE(sys_open, sys_open, 2, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_newstat
++TRACE_SYSCALL_TABLE(sys_newstat, sys_newstat, 4, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_newfstat
++TRACE_SYSCALL_TABLE(sys_newfstat, sys_newfstat, 5, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_newlstat
++TRACE_SYSCALL_TABLE(sys_newlstat, sys_newlstat, 6, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_poll
++TRACE_SYSCALL_TABLE(sys_poll, sys_poll, 7, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_rt_sigaction
++TRACE_SYSCALL_TABLE(sys_rt_sigaction, sys_rt_sigaction, 13, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_rt_sigprocmask
++TRACE_SYSCALL_TABLE(sys_rt_sigprocmask, sys_rt_sigprocmask, 14, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_pread64
++TRACE_SYSCALL_TABLE(sys_pread64, sys_pread64, 17, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_pwrite64
++TRACE_SYSCALL_TABLE(sys_pwrite64, sys_pwrite64, 18, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_readv
++TRACE_SYSCALL_TABLE(sys_readv, sys_readv, 19, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_writev
++TRACE_SYSCALL_TABLE(sys_writev, sys_writev, 20, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_access
++TRACE_SYSCALL_TABLE(sys_access, sys_access, 21, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_pipe
++TRACE_SYSCALL_TABLE(sys_pipe, sys_pipe, 22, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_select
++TRACE_SYSCALL_TABLE(sys_select, sys_select, 23, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mincore
++TRACE_SYSCALL_TABLE(sys_mincore, sys_mincore, 27, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_shmat
++TRACE_SYSCALL_TABLE(sys_shmat, sys_shmat, 30, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_shmctl
++TRACE_SYSCALL_TABLE(sys_shmctl, sys_shmctl, 31, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_nanosleep
++TRACE_SYSCALL_TABLE(sys_nanosleep, sys_nanosleep, 35, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getitimer
++TRACE_SYSCALL_TABLE(sys_getitimer, sys_getitimer, 36, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setitimer
++TRACE_SYSCALL_TABLE(sys_setitimer, sys_setitimer, 38, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sendfile64
++TRACE_SYSCALL_TABLE(sys_sendfile64, sys_sendfile64, 40, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_connect
++TRACE_SYSCALL_TABLE(sys_connect, sys_connect, 42, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_accept
++TRACE_SYSCALL_TABLE(sys_accept, sys_accept, 43, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sendto
++TRACE_SYSCALL_TABLE(sys_sendto, sys_sendto, 44, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_recvfrom
++TRACE_SYSCALL_TABLE(sys_recvfrom, sys_recvfrom, 45, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sendmsg
++TRACE_SYSCALL_TABLE(sys_sendmsg, sys_sendmsg, 46, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_recvmsg
++TRACE_SYSCALL_TABLE(sys_recvmsg, sys_recvmsg, 47, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_bind
++TRACE_SYSCALL_TABLE(sys_bind, sys_bind, 49, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getsockname
++TRACE_SYSCALL_TABLE(sys_getsockname, sys_getsockname, 51, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getpeername
++TRACE_SYSCALL_TABLE(sys_getpeername, sys_getpeername, 52, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_socketpair
++TRACE_SYSCALL_TABLE(sys_socketpair, sys_socketpair, 53, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setsockopt
++TRACE_SYSCALL_TABLE(sys_setsockopt, sys_setsockopt, 54, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getsockopt
++TRACE_SYSCALL_TABLE(sys_getsockopt, sys_getsockopt, 55, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_wait4
++TRACE_SYSCALL_TABLE(sys_wait4, sys_wait4, 61, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_newuname
++TRACE_SYSCALL_TABLE(sys_newuname, sys_newuname, 63, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_semop
++TRACE_SYSCALL_TABLE(sys_semop, sys_semop, 65, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_shmdt
++TRACE_SYSCALL_TABLE(sys_shmdt, sys_shmdt, 67, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_msgsnd
++TRACE_SYSCALL_TABLE(sys_msgsnd, sys_msgsnd, 69, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_msgrcv
++TRACE_SYSCALL_TABLE(sys_msgrcv, sys_msgrcv, 70, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_msgctl
++TRACE_SYSCALL_TABLE(sys_msgctl, sys_msgctl, 71, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_truncate
++TRACE_SYSCALL_TABLE(sys_truncate, sys_truncate, 76, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getdents
++TRACE_SYSCALL_TABLE(sys_getdents, sys_getdents, 78, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getcwd
++TRACE_SYSCALL_TABLE(sys_getcwd, sys_getcwd, 79, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_chdir
++TRACE_SYSCALL_TABLE(sys_chdir, sys_chdir, 80, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_rename
++TRACE_SYSCALL_TABLE(sys_rename, sys_rename, 82, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mkdir
++TRACE_SYSCALL_TABLE(sys_mkdir, sys_mkdir, 83, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_rmdir
++TRACE_SYSCALL_TABLE(sys_rmdir, sys_rmdir, 84, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_creat
++TRACE_SYSCALL_TABLE(sys_creat, sys_creat, 85, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_link
++TRACE_SYSCALL_TABLE(sys_link, sys_link, 86, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_unlink
++TRACE_SYSCALL_TABLE(sys_unlink, sys_unlink, 87, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_symlink
++TRACE_SYSCALL_TABLE(sys_symlink, sys_symlink, 88, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_readlink
++TRACE_SYSCALL_TABLE(sys_readlink, sys_readlink, 89, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_chmod
++TRACE_SYSCALL_TABLE(sys_chmod, sys_chmod, 90, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_chown
++TRACE_SYSCALL_TABLE(sys_chown, sys_chown, 92, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_lchown
++TRACE_SYSCALL_TABLE(sys_lchown, sys_lchown, 94, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_gettimeofday
++TRACE_SYSCALL_TABLE(sys_gettimeofday, sys_gettimeofday, 96, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getrlimit
++TRACE_SYSCALL_TABLE(sys_getrlimit, sys_getrlimit, 97, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getrusage
++TRACE_SYSCALL_TABLE(sys_getrusage, sys_getrusage, 98, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sysinfo
++TRACE_SYSCALL_TABLE(sys_sysinfo, sys_sysinfo, 99, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_times
++TRACE_SYSCALL_TABLE(sys_times, sys_times, 100, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_syslog
++TRACE_SYSCALL_TABLE(sys_syslog, sys_syslog, 103, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getgroups
++TRACE_SYSCALL_TABLE(sys_getgroups, sys_getgroups, 115, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setgroups
++TRACE_SYSCALL_TABLE(sys_setgroups, sys_setgroups, 116, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getresuid
++TRACE_SYSCALL_TABLE(sys_getresuid, sys_getresuid, 118, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getresgid
++TRACE_SYSCALL_TABLE(sys_getresgid, sys_getresgid, 120, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_rt_sigpending
++TRACE_SYSCALL_TABLE(sys_rt_sigpending, sys_rt_sigpending, 127, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_rt_sigtimedwait
++TRACE_SYSCALL_TABLE(sys_rt_sigtimedwait, sys_rt_sigtimedwait, 128, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_rt_sigqueueinfo
++TRACE_SYSCALL_TABLE(sys_rt_sigqueueinfo, sys_rt_sigqueueinfo, 129, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_rt_sigsuspend
++TRACE_SYSCALL_TABLE(sys_rt_sigsuspend, sys_rt_sigsuspend, 130, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sigaltstack
++TRACE_SYSCALL_TABLE(sys_sigaltstack, sys_sigaltstack, 131, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_utime
++TRACE_SYSCALL_TABLE(sys_utime, sys_utime, 132, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mknod
++TRACE_SYSCALL_TABLE(sys_mknod, sys_mknod, 133, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_ustat
++TRACE_SYSCALL_TABLE(sys_ustat, sys_ustat, 136, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_statfs
++TRACE_SYSCALL_TABLE(sys_statfs, sys_statfs, 137, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fstatfs
++TRACE_SYSCALL_TABLE(sys_fstatfs, sys_fstatfs, 138, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sched_setparam
++TRACE_SYSCALL_TABLE(sys_sched_setparam, sys_sched_setparam, 142, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sched_getparam
++TRACE_SYSCALL_TABLE(sys_sched_getparam, sys_sched_getparam, 143, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sched_setscheduler
++TRACE_SYSCALL_TABLE(sys_sched_setscheduler, sys_sched_setscheduler, 144, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sched_rr_get_interval
++TRACE_SYSCALL_TABLE(sys_sched_rr_get_interval, sys_sched_rr_get_interval, 148, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_pivot_root
++TRACE_SYSCALL_TABLE(sys_pivot_root, sys_pivot_root, 155, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sysctl
++TRACE_SYSCALL_TABLE(sys_sysctl, sys_sysctl, 156, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_adjtimex
++TRACE_SYSCALL_TABLE(sys_adjtimex, sys_adjtimex, 159, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setrlimit
++TRACE_SYSCALL_TABLE(sys_setrlimit, sys_setrlimit, 160, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_chroot
++TRACE_SYSCALL_TABLE(sys_chroot, sys_chroot, 161, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_acct
++TRACE_SYSCALL_TABLE(sys_acct, sys_acct, 163, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_settimeofday
++TRACE_SYSCALL_TABLE(sys_settimeofday, sys_settimeofday, 164, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mount
++TRACE_SYSCALL_TABLE(sys_mount, sys_mount, 165, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_umount
++TRACE_SYSCALL_TABLE(sys_umount, sys_umount, 166, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_swapon
++TRACE_SYSCALL_TABLE(sys_swapon, sys_swapon, 167, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_swapoff
++TRACE_SYSCALL_TABLE(sys_swapoff, sys_swapoff, 168, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_reboot
++TRACE_SYSCALL_TABLE(sys_reboot, sys_reboot, 169, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sethostname
++TRACE_SYSCALL_TABLE(sys_sethostname, sys_sethostname, 170, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setdomainname
++TRACE_SYSCALL_TABLE(sys_setdomainname, sys_setdomainname, 171, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_init_module
++TRACE_SYSCALL_TABLE(sys_init_module, sys_init_module, 175, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_delete_module
++TRACE_SYSCALL_TABLE(sys_delete_module, sys_delete_module, 176, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_quotactl
++TRACE_SYSCALL_TABLE(sys_quotactl, sys_quotactl, 179, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_setxattr
++TRACE_SYSCALL_TABLE(sys_setxattr, sys_setxattr, 188, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_lsetxattr
++TRACE_SYSCALL_TABLE(sys_lsetxattr, sys_lsetxattr, 189, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fsetxattr
++TRACE_SYSCALL_TABLE(sys_fsetxattr, sys_fsetxattr, 190, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getxattr
++TRACE_SYSCALL_TABLE(sys_getxattr, sys_getxattr, 191, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_lgetxattr
++TRACE_SYSCALL_TABLE(sys_lgetxattr, sys_lgetxattr, 192, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fgetxattr
++TRACE_SYSCALL_TABLE(sys_fgetxattr, sys_fgetxattr, 193, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_listxattr
++TRACE_SYSCALL_TABLE(sys_listxattr, sys_listxattr, 194, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_llistxattr
++TRACE_SYSCALL_TABLE(sys_llistxattr, sys_llistxattr, 195, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_flistxattr
++TRACE_SYSCALL_TABLE(sys_flistxattr, sys_flistxattr, 196, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_removexattr
++TRACE_SYSCALL_TABLE(sys_removexattr, sys_removexattr, 197, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_lremovexattr
++TRACE_SYSCALL_TABLE(sys_lremovexattr, sys_lremovexattr, 198, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fremovexattr
++TRACE_SYSCALL_TABLE(sys_fremovexattr, sys_fremovexattr, 199, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_time
++TRACE_SYSCALL_TABLE(sys_time, sys_time, 201, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_futex
++TRACE_SYSCALL_TABLE(sys_futex, sys_futex, 202, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sched_setaffinity
++TRACE_SYSCALL_TABLE(sys_sched_setaffinity, sys_sched_setaffinity, 203, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sched_getaffinity
++TRACE_SYSCALL_TABLE(sys_sched_getaffinity, sys_sched_getaffinity, 204, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_io_setup
++TRACE_SYSCALL_TABLE(sys_io_setup, sys_io_setup, 206, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_io_getevents
++TRACE_SYSCALL_TABLE(sys_io_getevents, sys_io_getevents, 208, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_io_submit
++TRACE_SYSCALL_TABLE(sys_io_submit, sys_io_submit, 209, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_io_cancel
++TRACE_SYSCALL_TABLE(sys_io_cancel, sys_io_cancel, 210, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_lookup_dcookie
++TRACE_SYSCALL_TABLE(sys_lookup_dcookie, sys_lookup_dcookie, 212, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getdents64
++TRACE_SYSCALL_TABLE(sys_getdents64, sys_getdents64, 217, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_set_tid_address
++TRACE_SYSCALL_TABLE(sys_set_tid_address, sys_set_tid_address, 218, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_semtimedop
++TRACE_SYSCALL_TABLE(sys_semtimedop, sys_semtimedop, 220, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_timer_create
++TRACE_SYSCALL_TABLE(sys_timer_create, sys_timer_create, 222, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_timer_settime
++TRACE_SYSCALL_TABLE(sys_timer_settime, sys_timer_settime, 223, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_timer_gettime
++TRACE_SYSCALL_TABLE(sys_timer_gettime, sys_timer_gettime, 224, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_clock_settime
++TRACE_SYSCALL_TABLE(sys_clock_settime, sys_clock_settime, 227, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_clock_gettime
++TRACE_SYSCALL_TABLE(sys_clock_gettime, sys_clock_gettime, 228, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_clock_getres
++TRACE_SYSCALL_TABLE(sys_clock_getres, sys_clock_getres, 229, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_clock_nanosleep
++TRACE_SYSCALL_TABLE(sys_clock_nanosleep, sys_clock_nanosleep, 230, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_epoll_wait
++TRACE_SYSCALL_TABLE(sys_epoll_wait, sys_epoll_wait, 232, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_epoll_ctl
++TRACE_SYSCALL_TABLE(sys_epoll_ctl, sys_epoll_ctl, 233, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_utimes
++TRACE_SYSCALL_TABLE(sys_utimes, sys_utimes, 235, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mbind
++TRACE_SYSCALL_TABLE(sys_mbind, sys_mbind, 237, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_set_mempolicy
++TRACE_SYSCALL_TABLE(sys_set_mempolicy, sys_set_mempolicy, 238, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_get_mempolicy
++TRACE_SYSCALL_TABLE(sys_get_mempolicy, sys_get_mempolicy, 239, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mq_open
++TRACE_SYSCALL_TABLE(sys_mq_open, sys_mq_open, 240, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mq_unlink
++TRACE_SYSCALL_TABLE(sys_mq_unlink, sys_mq_unlink, 241, 1)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mq_timedsend
++TRACE_SYSCALL_TABLE(sys_mq_timedsend, sys_mq_timedsend, 242, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mq_timedreceive
++TRACE_SYSCALL_TABLE(sys_mq_timedreceive, sys_mq_timedreceive, 243, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mq_notify
++TRACE_SYSCALL_TABLE(sys_mq_notify, sys_mq_notify, 244, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mq_getsetattr
++TRACE_SYSCALL_TABLE(sys_mq_getsetattr, sys_mq_getsetattr, 245, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_kexec_load
++TRACE_SYSCALL_TABLE(sys_kexec_load, sys_kexec_load, 246, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_waitid
++TRACE_SYSCALL_TABLE(sys_waitid, sys_waitid, 247, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_add_key
++TRACE_SYSCALL_TABLE(sys_add_key, sys_add_key, 248, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_request_key
++TRACE_SYSCALL_TABLE(sys_request_key, sys_request_key, 249, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_inotify_add_watch
++TRACE_SYSCALL_TABLE(sys_inotify_add_watch, sys_inotify_add_watch, 254, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_migrate_pages
++TRACE_SYSCALL_TABLE(sys_migrate_pages, sys_migrate_pages, 256, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_openat
++TRACE_SYSCALL_TABLE(sys_openat, sys_openat, 257, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mkdirat
++TRACE_SYSCALL_TABLE(sys_mkdirat, sys_mkdirat, 258, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_mknodat
++TRACE_SYSCALL_TABLE(sys_mknodat, sys_mknodat, 259, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fchownat
++TRACE_SYSCALL_TABLE(sys_fchownat, sys_fchownat, 260, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_futimesat
++TRACE_SYSCALL_TABLE(sys_futimesat, sys_futimesat, 261, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_newfstatat
++TRACE_SYSCALL_TABLE(sys_newfstatat, sys_newfstatat, 262, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_unlinkat
++TRACE_SYSCALL_TABLE(sys_unlinkat, sys_unlinkat, 263, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_renameat
++TRACE_SYSCALL_TABLE(sys_renameat, sys_renameat, 264, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_linkat
++TRACE_SYSCALL_TABLE(sys_linkat, sys_linkat, 265, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_symlinkat
++TRACE_SYSCALL_TABLE(sys_symlinkat, sys_symlinkat, 266, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_readlinkat
++TRACE_SYSCALL_TABLE(sys_readlinkat, sys_readlinkat, 267, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fchmodat
++TRACE_SYSCALL_TABLE(sys_fchmodat, sys_fchmodat, 268, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_faccessat
++TRACE_SYSCALL_TABLE(sys_faccessat, sys_faccessat, 269, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_pselect6
++TRACE_SYSCALL_TABLE(sys_pselect6, sys_pselect6, 270, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_ppoll
++TRACE_SYSCALL_TABLE(sys_ppoll, sys_ppoll, 271, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_set_robust_list
++TRACE_SYSCALL_TABLE(sys_set_robust_list, sys_set_robust_list, 273, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_get_robust_list
++TRACE_SYSCALL_TABLE(sys_get_robust_list, sys_get_robust_list, 274, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_splice
++TRACE_SYSCALL_TABLE(sys_splice, sys_splice, 275, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_vmsplice
++TRACE_SYSCALL_TABLE(sys_vmsplice, sys_vmsplice, 278, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_move_pages
++TRACE_SYSCALL_TABLE(sys_move_pages, sys_move_pages, 279, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_utimensat
++TRACE_SYSCALL_TABLE(sys_utimensat, sys_utimensat, 280, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_epoll_pwait
++TRACE_SYSCALL_TABLE(sys_epoll_pwait, sys_epoll_pwait, 281, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_signalfd
++TRACE_SYSCALL_TABLE(sys_signalfd, sys_signalfd, 282, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_timerfd_settime
++TRACE_SYSCALL_TABLE(sys_timerfd_settime, sys_timerfd_settime, 286, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_timerfd_gettime
++TRACE_SYSCALL_TABLE(sys_timerfd_gettime, sys_timerfd_gettime, 287, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_accept4
++TRACE_SYSCALL_TABLE(sys_accept4, sys_accept4, 288, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_signalfd4
++TRACE_SYSCALL_TABLE(sys_signalfd4, sys_signalfd4, 289, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_pipe2
++TRACE_SYSCALL_TABLE(sys_pipe2, sys_pipe2, 293, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_preadv
++TRACE_SYSCALL_TABLE(sys_preadv, sys_preadv, 295, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_pwritev
++TRACE_SYSCALL_TABLE(sys_pwritev, sys_pwritev, 296, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_rt_tgsigqueueinfo
++TRACE_SYSCALL_TABLE(sys_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, 297, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_perf_event_open
++TRACE_SYSCALL_TABLE(sys_perf_event_open, sys_perf_event_open, 298, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_recvmmsg
++TRACE_SYSCALL_TABLE(sys_recvmmsg, sys_recvmmsg, 299, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_fanotify_mark
++TRACE_SYSCALL_TABLE(sys_fanotify_mark, sys_fanotify_mark, 301, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_prlimit64
++TRACE_SYSCALL_TABLE(sys_prlimit64, sys_prlimit64, 302, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_name_to_handle_at
++TRACE_SYSCALL_TABLE(sys_name_to_handle_at, sys_name_to_handle_at, 303, 5)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_open_by_handle_at
++TRACE_SYSCALL_TABLE(sys_open_by_handle_at, sys_open_by_handle_at, 304, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_clock_adjtime
++TRACE_SYSCALL_TABLE(sys_clock_adjtime, sys_clock_adjtime, 305, 2)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_sendmmsg
++TRACE_SYSCALL_TABLE(sys_sendmmsg, sys_sendmmsg, 307, 4)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_getcpu
++TRACE_SYSCALL_TABLE(sys_getcpu, sys_getcpu, 309, 3)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_process_vm_readv
++TRACE_SYSCALL_TABLE(sys_process_vm_readv, sys_process_vm_readv, 310, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_process_vm_writev
++TRACE_SYSCALL_TABLE(sys_process_vm_writev, sys_process_vm_writev, 311, 6)
++#endif
++#ifndef OVERRIDE_TABLE_64_sys_finit_module
++TRACE_SYSCALL_TABLE(sys_finit_module, sys_finit_module, 313, 3)
++#endif
++
++#endif /* CREATE_SYSCALL_TABLE */
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.10.0-rc7_pointers_override.h
+@@ -0,0 +1,12 @@
++#ifndef CREATE_SYSCALL_TABLE
++
++#else /* CREATE_SYSCALL_TABLE */
++
++#define OVERRIDE_TABLE_64_sys_clone
++TRACE_SYSCALL_TABLE(sys_clone, sys_clone, 56, 5)
++#define OVERRIDE_TABLE_64_sys_execve
++TRACE_SYSCALL_TABLE(sys_execve, sys_execve, 59, 3)
++#define OVERRIDE_TABLE_64_sys_getcpu
++TRACE_SYSCALL_TABLE(sys_getcpu, sys_getcpu, 309, 3)
++
++#endif /* CREATE_SYSCALL_TABLE */
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/Makefile
+@@ -0,0 +1 @@
++obj-m += lttng-syscalls-extractor.o
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/lttng-syscalls-extractor.c
+@@ -0,0 +1,100 @@
++/*
++ * lttng-syscalls-extractor.c
++ *
++ * Dump syscall metadata to console.
++ *
++ * Copyright 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/list.h>
++#include <linux/err.h>
++#include <linux/slab.h>
++#include <linux/kallsyms.h>
++#include <linux/dcache.h>
++#include <linux/ftrace_event.h>
++#include <trace/syscall.h>
++#include <asm/syscall.h>
++
++#ifndef CONFIG_FTRACE_SYSCALLS
++#error "You need to set CONFIG_FTRACE_SYSCALLS=y"
++#endif
++
++#ifndef CONFIG_KALLSYMS_ALL
++#error "You need to set CONFIG_KALLSYMS_ALL=y"
++#endif
++
++static struct syscall_metadata **__start_syscalls_metadata;
++static struct syscall_metadata **__stop_syscalls_metadata;
++
++static __init
++struct syscall_metadata *find_syscall_meta(unsigned long syscall)
++{
++ struct syscall_metadata **iter;
++
++ for (iter = __start_syscalls_metadata;
++ iter < __stop_syscalls_metadata; iter++) {
++ if ((*iter)->syscall_nr == syscall)
++ return (*iter);
++ }
++ return NULL;
++}
++
++int init_module(void)
++{
++ struct syscall_metadata *meta;
++ int i;
++
++ __start_syscalls_metadata = (void *) kallsyms_lookup_name("__start_syscalls_metadata");
++ __stop_syscalls_metadata = (void *) kallsyms_lookup_name("__stop_syscalls_metadata");
++
++ for (i = 0; i < NR_syscalls; i++) {
++ int j;
++
++ meta = find_syscall_meta(i);
++ if (!meta)
++ continue;
++ printk("syscall %s nr %d nbargs %d ",
++ meta->name, meta->syscall_nr, meta->nb_args);
++ printk("types: (");
++ for (j = 0; j < meta->nb_args; j++) {
++ if (j > 0)
++ printk(", ");
++ printk("%s", meta->types[j]);
++ }
++ printk(") ");
++ printk("args: (");
++ for (j = 0; j < meta->nb_args; j++) {
++ if (j > 0)
++ printk(", ");
++ printk("%s", meta->args[j]);
++ }
++ printk(")\n");
++ }
++ printk("SUCCESS\n");
++
++ return -1;
++}
++
++void cleanup_module(void)
++{
++}
++
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-generate-headers.sh
+@@ -0,0 +1,279 @@
++#!/bin/sh
++
++# Generate system call probe description macros from syscall metadata dump file.
++# The resulting header will be written in the headers subdirectory, in a file name
++# based on the name of the input file.
++#
++# example usage:
++#
++# lttng-syscalls-generate-headers.sh <type> <input_dir> <input_filename_in_dir> <bitness>
++# lttng-syscalls-generate-headers.sh integers 3.0.4 x86-64-syscalls-3.0.4 64
++# lttng-syscalls-generate-headers.sh pointers 3.0.4 x86-64-syscalls-3.0.4 64
++
++CLASS=$1
++INPUTDIR=$2
++INPUTFILE=$3
++BITNESS=$4
++INPUT=${INPUTDIR}/${INPUTFILE}
++SRCFILE=gen.tmp.0
++TMPFILE=gen.tmp.1
++HEADER=headers/${INPUTFILE}_${CLASS}.h
++
++cp ${INPUT} ${SRCFILE}
++
++#Cleanup
++perl -p -e 's/^\[.*\] //g' ${SRCFILE} > ${TMPFILE}
++mv ${TMPFILE} ${SRCFILE}
++
++perl -p -e 's/^syscall sys_([^ ]*)/syscall $1/g' ${SRCFILE} > ${TMPFILE}
++mv ${TMPFILE} ${SRCFILE}
++
++#Filter
++
++if [ "$CLASS" = integers ]; then
++ #select integers and no-args.
++ CLASSCAP=INTEGERS
++ grep -v "\\*\|cap_user_header_t" ${SRCFILE} > ${TMPFILE}
++ mv ${TMPFILE} ${SRCFILE}
++fi
++
++
++if [ "$CLASS" = pointers ]; then
++ #select system calls using pointers.
++ CLASSCAP=POINTERS
++ grep "\\*\|cap_#user_header_t" ${SRCFILE} > ${TMPFILE}
++ mv ${TMPFILE} ${SRCFILE}
++fi
++
++echo "/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */" > ${HEADER}
++
++echo \
++"#ifndef CREATE_SYSCALL_TABLE
++
++#if !defined(_TRACE_SYSCALLS_${CLASSCAP}_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_SYSCALLS_${CLASSCAP}_H
++
++#include <linux/tracepoint.h>
++#include <linux/syscalls.h>
++#include \"${INPUTFILE}_${CLASS}_override.h\"
++#include \"syscalls_${CLASS}_override.h\"
++" >> ${HEADER}
++
++if [ "$CLASS" = integers ]; then
++
++NRARGS=0
++
++printf \
++'SC_DECLARE_EVENT_CLASS_NOARGS(syscalls_noargs,\n'\
++' TP_STRUCT__entry(),\n'\
++' TP_fast_assign(),\n'\
++' TP_printk()\n'\
++')'\
++ >> ${HEADER}
++
++grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
++perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
++'types: \(([^)]*)\) '\
++'args: \(([^)]*)\)/'\
++'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
++'SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_$1)\n'\
++'#endif/g'\
++ ${TMPFILE} >> ${HEADER}
++
++fi
++
++
++# types: 4
++# args 5
++
++NRARGS=1
++grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
++perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
++'types: \(([^)]*)\) '\
++'args: \(([^)]*)\)/'\
++'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
++'SC_TRACE_EVENT(sys_$1,\n'\
++' TP_PROTO($4 $5),\n'\
++' TP_ARGS($5),\n'\
++' TP_STRUCT__entry(__field($4, $5)),\n'\
++' TP_fast_assign(tp_assign($4, $5, $5)),\n'\
++' TP_printk()\n'\
++')\n'\
++'#endif/g'\
++ ${TMPFILE} >> ${HEADER}
++
++# types: 4 5
++# args 6 7
++
++NRARGS=2
++grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
++perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
++'types: \(([^,]*), ([^)]*)\) '\
++'args: \(([^,]*), ([^)]*)\)/'\
++'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
++'SC_TRACE_EVENT(sys_$1,\n'\
++' TP_PROTO($4 $6, $5 $7),\n'\
++' TP_ARGS($6, $7),\n'\
++' TP_STRUCT__entry(__field($4, $6) __field($5, $7)),\n'\
++' TP_fast_assign(tp_assign($4, $6, $6) tp_assign($5, $7, $7)),\n'\
++' TP_printk()\n'\
++')\n'\
++'#endif/g'\
++ ${TMPFILE} >> ${HEADER}
++
++# types: 4 5 6
++# args 7 8 9
++
++NRARGS=3
++grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
++perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
++'types: \(([^,]*), ([^,]*), ([^)]*)\) '\
++'args: \(([^,]*), ([^,]*), ([^)]*)\)/'\
++'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
++'SC_TRACE_EVENT(sys_$1,\n'\
++' TP_PROTO($4 $7, $5 $8, $6 $9),\n'\
++' TP_ARGS($7, $8, $9),\n'\
++' TP_STRUCT__entry(__field($4, $7) __field($5, $8) __field($6, $9)),\n'\
++' TP_fast_assign(tp_assign($4, $7, $7) tp_assign($5, $8, $8) tp_assign($6, $9, $9)),\n'\
++' TP_printk()\n'\
++')\n'\
++'#endif/g'\
++ ${TMPFILE} >> ${HEADER}
++
++
++# types: 4 5 6 7
++# args 8 9 10 11
++
++NRARGS=4
++grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
++perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
++'types: \(([^,]*), ([^,]*), ([^,]*), ([^)]*)\) '\
++'args: \(([^,]*), ([^,]*), ([^,]*), ([^)]*)\)/'\
++'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
++'SC_TRACE_EVENT(sys_$1,\n'\
++' TP_PROTO($4 $8, $5 $9, $6 $10, $7 $11),\n'\
++' TP_ARGS($8, $9, $10, $11),\n'\
++' TP_STRUCT__entry(__field($4, $8) __field($5, $9) __field($6, $10) __field($7, $11)),\n'\
++' TP_fast_assign(tp_assign($4, $8, $8) tp_assign($5, $9, $9) tp_assign($6, $10, $10) tp_assign($7, $11, $11)),\n'\
++' TP_printk()\n'\
++')\n'\
++'#endif/g'\
++ ${TMPFILE} >> ${HEADER}
++
++# types: 4 5 6 7 8
++# args 9 10 11 12 13
++
++NRARGS=5
++grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
++perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
++'types: \(([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^)]*)\) '\
++'args: \(([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^)]*)\)/'\
++'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
++'SC_TRACE_EVENT(sys_$1,\n'\
++' TP_PROTO($4 $9, $5 $10, $6 $11, $7 $12, $8 $13),\n'\
++' TP_ARGS($9, $10, $11, $12, $13),\n'\
++' TP_STRUCT__entry(__field($4, $9) __field($5, $10) __field($6, $11) __field($7, $12) __field($8, $13)),\n'\
++' TP_fast_assign(tp_assign($4, $9, $9) tp_assign($5, $10, $10) tp_assign($6, $11, $11) tp_assign($7, $12, $12) tp_assign($8, $13, $13)),\n'\
++' TP_printk()\n'\
++')\n'\
++'#endif/g'\
++ ${TMPFILE} >> ${HEADER}
++
++
++# types: 4 5 6 7 8 9
++# args 10 11 12 13 14 15
++
++NRARGS=6
++grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
++perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
++'types: \(([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^\)]*)\) '\
++'args: \(([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^\)]*)\)/'\
++'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
++'SC_TRACE_EVENT(sys_$1,\n'\
++' TP_PROTO($4 $10, $5 $11, $6 $12, $7 $13, $8 $14, $9 $15),\n'\
++' TP_ARGS($10, $11, $12, $13, $14, $15),\n'\
++' TP_STRUCT__entry(__field($4, $10) __field($5, $11) __field($6, $12) __field($7, $13) __field($8, $14) __field($9, $15)),\n'\
++' TP_fast_assign(tp_assign($4, $10, $10) tp_assign($5, $11, $11) tp_assign($6, $12, $12) tp_assign($7, $13, $13) tp_assign($8, $14, $14) tp_assign($9, $15, $15)),\n'\
++' TP_printk()\n'\
++')\n'\
++'#endif/g'\
++ ${TMPFILE} >> ${HEADER}
++
++# Macro for tracing syscall table
++
++rm -f ${TMPFILE}
++for NRARGS in $(seq 0 6); do
++ grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} >> ${TMPFILE}
++done
++
++echo \
++"
++#endif /* _TRACE_SYSCALLS_${CLASSCAP}_H */
++
++/* This part must be outside protection */
++#include \"../../../probes/define_trace.h\"
++
++#else /* CREATE_SYSCALL_TABLE */
++
++#include \"${INPUTFILE}_${CLASS}_override.h\"
++#include \"syscalls_${CLASS}_override.h\"
++" >> ${HEADER}
++
++NRARGS=0
++
++if [ "$CLASS" = integers ]; then
++#noargs
++grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
++perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) .*$/'\
++'#ifndef OVERRIDE_TABLE_'"${BITNESS}"'_sys_$1\n'\
++'TRACE_SYSCALL_TABLE\(syscalls_noargs, sys_$1, $2, $3\)\n'\
++'#endif/g'\
++ ${TMPFILE} >> ${HEADER}
++fi
++
++#others.
++grep -v "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
++perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) .*$/'\
++'#ifndef OVERRIDE_TABLE_'"${BITNESS}"'_sys_$1\n'\
++'TRACE_SYSCALL_TABLE(sys_$1, sys_$1, $2, $3)\n'\
++'#endif/g'\
++ ${TMPFILE} >> ${HEADER}
++
++echo -n \
++"
++#endif /* CREATE_SYSCALL_TABLE */
++" >> ${HEADER}
++
++#fields names: ...char * type with *name* or *file* or *path* or *root*
++# or *put_old* or *type*
++cp -f ${HEADER} ${TMPFILE}
++rm -f ${HEADER}
++perl -p -e 's/__field\(([^,)]*char \*), ([^\)]*)(name|file|path|root|put_old|type)([^\)]*)\)/__string_from_user($2$3$4, $2$3$4)/g'\
++ ${TMPFILE} >> ${HEADER}
++cp -f ${HEADER} ${TMPFILE}
++rm -f ${HEADER}
++perl -p -e 's/tp_assign\(([^,)]*char \*), ([^,]*)(name|file|path|root|put_old|type)([^,]*), ([^\)]*)\)/tp_copy_string_from_user($2$3$4, $5)/g'\
++ ${TMPFILE} >> ${HEADER}
++
++#prettify addresses heuristics.
++#field names with addr or ptr
++cp -f ${HEADER} ${TMPFILE}
++rm -f ${HEADER}
++perl -p -e 's/__field\(([^,)]*), ([^,)]*addr|[^,)]*ptr)([^),]*)\)/__field_hex($1, $2$3)/g'\
++ ${TMPFILE} >> ${HEADER}
++
++#field types ending with '*'
++cp -f ${HEADER} ${TMPFILE}
++rm -f ${HEADER}
++perl -p -e 's/__field\(([^,)]*\*), ([^),]*)\)/__field_hex($1, $2)/g'\
++ ${TMPFILE} >> ${HEADER}
++
++#strip the extra type information from tp_assign.
++cp -f ${HEADER} ${TMPFILE}
++rm -f ${HEADER}
++perl -p -e 's/tp_assign\(([^,)]*), ([^,]*), ([^\)]*)\)/tp_assign($2, $3)/g'\
++ ${TMPFILE} >> ${HEADER}
++
++rm -f ${INPUTFILE}.tmp
++rm -f ${TMPFILE}
++rm -f ${SRCFILE}
+--- /dev/null
++++ b/drivers/staging/lttng/lib/Makefile
+@@ -0,0 +1,11 @@
++obj-m += lttng-lib-ring-buffer.o
++
++lttng-lib-ring-buffer-objs := \
++ ringbuffer/ring_buffer_backend.o \
++ ringbuffer/ring_buffer_frontend.o \
++ ringbuffer/ring_buffer_iterator.o \
++ ringbuffer/ring_buffer_vfs.o \
++ ringbuffer/ring_buffer_splice.o \
++ ringbuffer/ring_buffer_mmap.o \
++ prio_heap/lttng_prio_heap.o \
++ ../wrapper/splice.o
+--- /dev/null
++++ b/drivers/staging/lttng/lib/align.h
+@@ -0,0 +1,73 @@
++#ifndef _LTTNG_ALIGN_H
++#define _LTTNG_ALIGN_H
++
++/*
++ * lib/align.h
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#ifdef __KERNEL__
++
++#include <linux/types.h>
++#include "bug.h"
++
++#define ALIGN_FLOOR(x, a) __ALIGN_FLOOR_MASK(x, (typeof(x)) (a) - 1)
++#define __ALIGN_FLOOR_MASK(x, mask) ((x) & ~(mask))
++#define PTR_ALIGN_FLOOR(p, a) \
++ ((typeof(p)) ALIGN_FLOOR((unsigned long) (p), a))
++
++/*
++ * Align pointer on natural object alignment.
++ */
++#define object_align(obj) PTR_ALIGN(obj, __alignof__(*(obj)))
++#define object_align_floor(obj) PTR_ALIGN_FLOOR(obj, __alignof__(*(obj)))
++
++/**
++ * offset_align - Calculate the offset needed to align an object on its natural
++ * alignment towards higher addresses.
++ * @align_drift: object offset from an "alignment"-aligned address.
++ * @alignment: natural object alignment. Must be non-zero, power of 2.
++ *
++ * Returns the offset that must be added to align towards higher
++ * addresses.
++ */
++#define offset_align(align_drift, alignment) \
++ ({ \
++ BUILD_RUNTIME_BUG_ON((alignment) == 0 \
++ || ((alignment) & ((alignment) - 1))); \
++ (((alignment) - (align_drift)) & ((alignment) - 1)); \
++ })
++
++/**
++ * offset_align_floor - Calculate the offset needed to align an object
++ * on its natural alignment towards lower addresses.
++ * @align_drift: object offset from an "alignment"-aligned address.
++ * @alignment: natural object alignment. Must be non-zero, power of 2.
++ *
++ * Returns the offset that must be substracted to align towards lower addresses.
++ */
++#define offset_align_floor(align_drift, alignment) \
++ ({ \
++ BUILD_RUNTIME_BUG_ON((alignment) == 0 \
++ || ((alignment) & ((alignment) - 1))); \
++ (((align_drift) - (alignment)) & ((alignment) - 1); \
++ })
++
++#endif /* __KERNEL__ */
++
++#endif
+--- /dev/null
++++ b/drivers/staging/lttng/lib/bitfield.h
+@@ -0,0 +1,408 @@
++#ifndef _BABELTRACE_BITFIELD_H
++#define _BABELTRACE_BITFIELD_H
++
++/*
++ * BabelTrace
++ *
++ * Bitfields read/write functions.
++ *
++ * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to deal
++ * in the Software without restriction, including without limitation the rights
++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
++ * copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ */
++
++#include "../lttng-endian.h"
++
++#ifndef CHAR_BIT
++#define CHAR_BIT 8
++#endif
++
++/* We can't shift a int from 32 bit, >> 32 and << 32 on int is undefined */
++#define _bt_piecewise_rshift(_v, _shift) \
++({ \
++ typeof(_v) ___v = (_v); \
++ typeof(_shift) ___shift = (_shift); \
++ unsigned long sb = (___shift) / (sizeof(___v) * CHAR_BIT - 1); \
++ unsigned long final = (___shift) % (sizeof(___v) * CHAR_BIT - 1); \
++ \
++ for (; sb; sb--) \
++ ___v >>= sizeof(___v) * CHAR_BIT - 1; \
++ ___v >>= final; \
++})
++
++#define _bt_piecewise_lshift(_v, _shift) \
++({ \
++ typeof(_v) ___v = (_v); \
++ typeof(_shift) ___shift = (_shift); \
++ unsigned long sb = (___shift) / (sizeof(___v) * CHAR_BIT - 1); \
++ unsigned long final = (___shift) % (sizeof(___v) * CHAR_BIT - 1); \
++ \
++ for (; sb; sb--) \
++ ___v <<= sizeof(___v) * CHAR_BIT - 1; \
++ ___v <<= final; \
++})
++
++#define _bt_is_signed_type(type) (((type)(-1)) < 0)
++
++#define _bt_unsigned_cast(type, v) \
++({ \
++ (sizeof(v) < sizeof(type)) ? \
++ ((type) (v)) & (~(~(type) 0 << (sizeof(v) * CHAR_BIT))) : \
++ (type) (v); \
++})
++
++/*
++ * bt_bitfield_write - write integer to a bitfield in native endianness
++ *
++ * Save integer to the bitfield, which starts at the "start" bit, has "len"
++ * bits.
++ * The inside of a bitfield is from high bits to low bits.
++ * Uses native endianness.
++ * For unsigned "v", pad MSB with 0 if bitfield is larger than v.
++ * For signed "v", sign-extend v if bitfield is larger than v.
++ *
++ * On little endian, bytes are placed from the less significant to the most
++ * significant. Also, consecutive bitfields are placed from lower bits to higher
++ * bits.
++ *
++ * On big endian, bytes are places from most significant to less significant.
++ * Also, consecutive bitfields are placed from higher to lower bits.
++ */
++
++#define _bt_bitfield_write_le(_ptr, type, _start, _length, _v) \
++do { \
++ typeof(_v) __v = (_v); \
++ type *__ptr = (void *) (_ptr); \
++ unsigned long __start = (_start), __length = (_length); \
++ type mask, cmask; \
++ unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
++ unsigned long start_unit, end_unit, this_unit; \
++ unsigned long end, cshift; /* cshift is "complement shift" */ \
++ \
++ if (!__length) \
++ break; \
++ \
++ end = __start + __length; \
++ start_unit = __start / ts; \
++ end_unit = (end + (ts - 1)) / ts; \
++ \
++ /* Trim v high bits */ \
++ if (__length < sizeof(__v) * CHAR_BIT) \
++ __v &= ~((~(typeof(__v)) 0) << __length); \
++ \
++ /* We can now append v with a simple "or", shift it piece-wise */ \
++ this_unit = start_unit; \
++ if (start_unit == end_unit - 1) { \
++ mask = ~((~(type) 0) << (__start % ts)); \
++ if (end % ts) \
++ mask |= (~(type) 0) << (end % ts); \
++ cmask = (type) __v << (__start % ts); \
++ cmask &= ~mask; \
++ __ptr[this_unit] &= mask; \
++ __ptr[this_unit] |= cmask; \
++ break; \
++ } \
++ if (__start % ts) { \
++ cshift = __start % ts; \
++ mask = ~((~(type) 0) << cshift); \
++ cmask = (type) __v << cshift; \
++ cmask &= ~mask; \
++ __ptr[this_unit] &= mask; \
++ __ptr[this_unit] |= cmask; \
++ __v = _bt_piecewise_rshift(__v, ts - cshift); \
++ __start += ts - cshift; \
++ this_unit++; \
++ } \
++ for (; this_unit < end_unit - 1; this_unit++) { \
++ __ptr[this_unit] = (type) __v; \
++ __v = _bt_piecewise_rshift(__v, ts); \
++ __start += ts; \
++ } \
++ if (end % ts) { \
++ mask = (~(type) 0) << (end % ts); \
++ cmask = (type) __v; \
++ cmask &= ~mask; \
++ __ptr[this_unit] &= mask; \
++ __ptr[this_unit] |= cmask; \
++ } else \
++ __ptr[this_unit] = (type) __v; \
++} while (0)
++
++#define _bt_bitfield_write_be(_ptr, type, _start, _length, _v) \
++do { \
++ typeof(_v) __v = (_v); \
++ type *__ptr = (void *) (_ptr); \
++ unsigned long __start = (_start), __length = (_length); \
++ type mask, cmask; \
++ unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
++ unsigned long start_unit, end_unit, this_unit; \
++ unsigned long end, cshift; /* cshift is "complement shift" */ \
++ \
++ if (!__length) \
++ break; \
++ \
++ end = __start + __length; \
++ start_unit = __start / ts; \
++ end_unit = (end + (ts - 1)) / ts; \
++ \
++ /* Trim v high bits */ \
++ if (__length < sizeof(__v) * CHAR_BIT) \
++ __v &= ~((~(typeof(__v)) 0) << __length); \
++ \
++ /* We can now append v with a simple "or", shift it piece-wise */ \
++ this_unit = end_unit - 1; \
++ if (start_unit == end_unit - 1) { \
++ mask = ~((~(type) 0) << ((ts - (end % ts)) % ts)); \
++ if (__start % ts) \
++ mask |= (~((type) 0)) << (ts - (__start % ts)); \
++ cmask = (type) __v << ((ts - (end % ts)) % ts); \
++ cmask &= ~mask; \
++ __ptr[this_unit] &= mask; \
++ __ptr[this_unit] |= cmask; \
++ break; \
++ } \
++ if (end % ts) { \
++ cshift = end % ts; \
++ mask = ~((~(type) 0) << (ts - cshift)); \
++ cmask = (type) __v << (ts - cshift); \
++ cmask &= ~mask; \
++ __ptr[this_unit] &= mask; \
++ __ptr[this_unit] |= cmask; \
++ __v = _bt_piecewise_rshift(__v, cshift); \
++ end -= cshift; \
++ this_unit--; \
++ } \
++ for (; (long) this_unit >= (long) start_unit + 1; this_unit--) { \
++ __ptr[this_unit] = (type) __v; \
++ __v = _bt_piecewise_rshift(__v, ts); \
++ end -= ts; \
++ } \
++ if (__start % ts) { \
++ mask = (~(type) 0) << (ts - (__start % ts)); \
++ cmask = (type) __v; \
++ cmask &= ~mask; \
++ __ptr[this_unit] &= mask; \
++ __ptr[this_unit] |= cmask; \
++ } else \
++ __ptr[this_unit] = (type) __v; \
++} while (0)
++
++/*
++ * bt_bitfield_write - write integer to a bitfield in native endianness
++ * bt_bitfield_write_le - write integer to a bitfield in little endian
++ * bt_bitfield_write_be - write integer to a bitfield in big endian
++ */
++
++#if (__BYTE_ORDER == __LITTLE_ENDIAN)
++
++#define bt_bitfield_write(ptr, type, _start, _length, _v) \
++ _bt_bitfield_write_le(ptr, type, _start, _length, _v)
++
++#define bt_bitfield_write_le(ptr, type, _start, _length, _v) \
++ _bt_bitfield_write_le(ptr, type, _start, _length, _v)
++
++#define bt_bitfield_write_be(ptr, type, _start, _length, _v) \
++ _bt_bitfield_write_be(ptr, unsigned char, _start, _length, _v)
++
++#elif (__BYTE_ORDER == __BIG_ENDIAN)
++
++#define bt_bitfield_write(ptr, type, _start, _length, _v) \
++ _bt_bitfield_write_be(ptr, type, _start, _length, _v)
++
++#define bt_bitfield_write_le(ptr, type, _start, _length, _v) \
++ _bt_bitfield_write_le(ptr, unsigned char, _start, _length, _v)
++
++#define bt_bitfield_write_be(ptr, type, _start, _length, _v) \
++ _bt_bitfield_write_be(ptr, type, _start, _length, _v)
++
++#else /* (BYTE_ORDER == PDP_ENDIAN) */
++
++#error "Byte order not supported"
++
++#endif
++
++#define _bt_bitfield_read_le(_ptr, type, _start, _length, _vptr) \
++do { \
++ typeof(*(_vptr)) *__vptr = (_vptr); \
++ typeof(*__vptr) __v; \
++ type *__ptr = (void *) (_ptr); \
++ unsigned long __start = (_start), __length = (_length); \
++ type mask, cmask; \
++ unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
++ unsigned long start_unit, end_unit, this_unit; \
++ unsigned long end, cshift; /* cshift is "complement shift" */ \
++ \
++ if (!__length) { \
++ *__vptr = 0; \
++ break; \
++ } \
++ \
++ end = __start + __length; \
++ start_unit = __start / ts; \
++ end_unit = (end + (ts - 1)) / ts; \
++ \
++ this_unit = end_unit - 1; \
++ if (_bt_is_signed_type(typeof(__v)) \
++ && (__ptr[this_unit] & ((type) 1 << ((end % ts ? : ts) - 1)))) \
++ __v = ~(typeof(__v)) 0; \
++ else \
++ __v = 0; \
++ if (start_unit == end_unit - 1) { \
++ cmask = __ptr[this_unit]; \
++ cmask >>= (__start % ts); \
++ if ((end - __start) % ts) { \
++ mask = ~((~(type) 0) << (end - __start)); \
++ cmask &= mask; \
++ } \
++ __v = _bt_piecewise_lshift(__v, end - __start); \
++ __v |= _bt_unsigned_cast(typeof(__v), cmask); \
++ *__vptr = __v; \
++ break; \
++ } \
++ if (end % ts) { \
++ cshift = end % ts; \
++ mask = ~((~(type) 0) << cshift); \
++ cmask = __ptr[this_unit]; \
++ cmask &= mask; \
++ __v = _bt_piecewise_lshift(__v, cshift); \
++ __v |= _bt_unsigned_cast(typeof(__v), cmask); \
++ end -= cshift; \
++ this_unit--; \
++ } \
++ for (; (long) this_unit >= (long) start_unit + 1; this_unit--) { \
++ __v = _bt_piecewise_lshift(__v, ts); \
++ __v |= _bt_unsigned_cast(typeof(__v), __ptr[this_unit]);\
++ end -= ts; \
++ } \
++ if (__start % ts) { \
++ mask = ~((~(type) 0) << (ts - (__start % ts))); \
++ cmask = __ptr[this_unit]; \
++ cmask >>= (__start % ts); \
++ cmask &= mask; \
++ __v = _bt_piecewise_lshift(__v, ts - (__start % ts)); \
++ __v |= _bt_unsigned_cast(typeof(__v), cmask); \
++ } else { \
++ __v = _bt_piecewise_lshift(__v, ts); \
++ __v |= _bt_unsigned_cast(typeof(__v), __ptr[this_unit]);\
++ } \
++ *__vptr = __v; \
++} while (0)
++
++#define _bt_bitfield_read_be(_ptr, type, _start, _length, _vptr) \
++do { \
++ typeof(*(_vptr)) *__vptr = (_vptr); \
++ typeof(*__vptr) __v; \
++ type *__ptr = (void *) (_ptr); \
++ unsigned long __start = (_start), __length = (_length); \
++ type mask, cmask; \
++ unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
++ unsigned long start_unit, end_unit, this_unit; \
++ unsigned long end, cshift; /* cshift is "complement shift" */ \
++ \
++ if (!__length) { \
++ *__vptr = 0; \
++ break; \
++ } \
++ \
++ end = __start + __length; \
++ start_unit = __start / ts; \
++ end_unit = (end + (ts - 1)) / ts; \
++ \
++ this_unit = start_unit; \
++ if (_bt_is_signed_type(typeof(__v)) \
++ && (__ptr[this_unit] & ((type) 1 << (ts - (__start % ts) - 1)))) \
++ __v = ~(typeof(__v)) 0; \
++ else \
++ __v = 0; \
++ if (start_unit == end_unit - 1) { \
++ cmask = __ptr[this_unit]; \
++ cmask >>= (ts - (end % ts)) % ts; \
++ if ((end - __start) % ts) { \
++ mask = ~((~(type) 0) << (end - __start)); \
++ cmask &= mask; \
++ } \
++ __v = _bt_piecewise_lshift(__v, end - __start); \
++ __v |= _bt_unsigned_cast(typeof(__v), cmask); \
++ *__vptr = __v; \
++ break; \
++ } \
++ if (__start % ts) { \
++ cshift = __start % ts; \
++ mask = ~((~(type) 0) << (ts - cshift)); \
++ cmask = __ptr[this_unit]; \
++ cmask &= mask; \
++ __v = _bt_piecewise_lshift(__v, ts - cshift); \
++ __v |= _bt_unsigned_cast(typeof(__v), cmask); \
++ __start += ts - cshift; \
++ this_unit++; \
++ } \
++ for (; this_unit < end_unit - 1; this_unit++) { \
++ __v = _bt_piecewise_lshift(__v, ts); \
++ __v |= _bt_unsigned_cast(typeof(__v), __ptr[this_unit]);\
++ __start += ts; \
++ } \
++ if (end % ts) { \
++ mask = ~((~(type) 0) << (end % ts)); \
++ cmask = __ptr[this_unit]; \
++ cmask >>= ts - (end % ts); \
++ cmask &= mask; \
++ __v = _bt_piecewise_lshift(__v, end % ts); \
++ __v |= _bt_unsigned_cast(typeof(__v), cmask); \
++ } else { \
++ __v = _bt_piecewise_lshift(__v, ts); \
++ __v |= _bt_unsigned_cast(typeof(__v), __ptr[this_unit]);\
++ } \
++ *__vptr = __v; \
++} while (0)
++
++/*
++ * bt_bitfield_read - read integer from a bitfield in native endianness
++ * bt_bitfield_read_le - read integer from a bitfield in little endian
++ * bt_bitfield_read_be - read integer from a bitfield in big endian
++ */
++
++#if (__BYTE_ORDER == __LITTLE_ENDIAN)
++
++#define bt_bitfield_read(_ptr, type, _start, _length, _vptr) \
++ _bt_bitfield_read_le(_ptr, type, _start, _length, _vptr)
++
++#define bt_bitfield_read_le(_ptr, type, _start, _length, _vptr) \
++ _bt_bitfield_read_le(_ptr, type, _start, _length, _vptr)
++
++#define bt_bitfield_read_be(_ptr, type, _start, _length, _vptr) \
++ _bt_bitfield_read_be(_ptr, unsigned char, _start, _length, _vptr)
++
++#elif (__BYTE_ORDER == __BIG_ENDIAN)
++
++#define bt_bitfield_read(_ptr, type, _start, _length, _vptr) \
++ _bt_bitfield_read_be(_ptr, type, _start, _length, _vptr)
++
++#define bt_bitfield_read_le(_ptr, type, _start, _length, _vptr) \
++ _bt_bitfield_read_le(_ptr, unsigned char, _start, _length, _vptr)
++
++#define bt_bitfield_read_be(_ptr, type, _start, _length, _vptr) \
++ _bt_bitfield_read_be(_ptr, type, _start, _length, _vptr)
++
++#else /* (__BYTE_ORDER == __PDP_ENDIAN) */
++
++#error "Byte order not supported"
++
++#endif
++
++#endif /* _BABELTRACE_BITFIELD_H */
+--- /dev/null
++++ b/drivers/staging/lttng/lib/bug.h
+@@ -0,0 +1,41 @@
++#ifndef _LTTNG_BUG_H
++#define _LTTNG_BUG_H
++
++/*
++ * lib/bug.h
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++/**
++ * BUILD_RUNTIME_BUG_ON - check condition at build (if constant) or runtime
++ * @condition: the condition which should be false.
++ *
++ * If the condition is a constant and true, the compiler will generate a build
++ * error. If the condition is not constant, a BUG will be triggered at runtime
++ * if the condition is ever true. If the condition is constant and false, no
++ * code is emitted.
++ */
++#define BUILD_RUNTIME_BUG_ON(condition) \
++ do { \
++ if (__builtin_constant_p(condition)) \
++ BUILD_BUG_ON(condition); \
++ else \
++ BUG_ON(condition); \
++ } while (0)
++
++#endif
+--- /dev/null
++++ b/drivers/staging/lttng/lib/prio_heap/lttng_prio_heap.c
+@@ -0,0 +1,215 @@
++/*
++ * lttng_prio_heap.c
++ *
++ * Priority heap containing pointers. Based on CLRS, chapter 6.
++ *
++ * Copyright 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to deal
++ * in the Software without restriction, including without limitation the rights
++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
++ * copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ */
++
++#include <linux/slab.h>
++#include "lttng_prio_heap.h"
++
++#ifdef DEBUG_HEAP
++void lttng_check_heap(const struct lttng_ptr_heap *heap)
++{
++ size_t i;
++
++ if (!heap->len)
++ return;
++
++ for (i = 1; i < heap->len; i++)
++ WARN_ON_ONCE(!heap->gt(heap->ptrs[i], heap->ptrs[0]));
++}
++#endif
++
++static
++size_t parent(size_t i)
++{
++ return (i -1) >> 1;
++}
++
++static
++size_t left(size_t i)
++{
++ return (i << 1) + 1;
++}
++
++static
++size_t right(size_t i)
++{
++ return (i << 1) + 2;
++}
++
++/*
++ * Copy of heap->ptrs pointer is invalid after heap_grow.
++ */
++static
++int heap_grow(struct lttng_ptr_heap *heap, size_t new_len)
++{
++ void **new_ptrs;
++
++ if (heap->alloc_len >= new_len)
++ return 0;
++
++ heap->alloc_len = max_t(size_t, new_len, heap->alloc_len << 1);
++ new_ptrs = kmalloc(heap->alloc_len * sizeof(void *), heap->gfpmask);
++ if (!new_ptrs)
++ return -ENOMEM;
++ if (heap->ptrs)
++ memcpy(new_ptrs, heap->ptrs, heap->len * sizeof(void *));
++ kfree(heap->ptrs);
++ heap->ptrs = new_ptrs;
++ return 0;
++}
++
++static
++int heap_set_len(struct lttng_ptr_heap *heap, size_t new_len)
++{
++ int ret;
++
++ ret = heap_grow(heap, new_len);
++ if (ret)
++ return ret;
++ heap->len = new_len;
++ return 0;
++}
++
++int lttng_heap_init(struct lttng_ptr_heap *heap, size_t alloc_len,
++ gfp_t gfpmask, int gt(void *a, void *b))
++{
++ heap->ptrs = NULL;
++ heap->len = 0;
++ heap->alloc_len = 0;
++ heap->gt = gt;
++ heap->gfpmask = gfpmask;
++ /*
++ * Minimum size allocated is 1 entry to ensure memory allocation
++ * never fails within heap_replace_max.
++ */
++ return heap_grow(heap, max_t(size_t, 1, alloc_len));
++}
++
++void lttng_heap_free(struct lttng_ptr_heap *heap)
++{
++ kfree(heap->ptrs);
++}
++
++static void heapify(struct lttng_ptr_heap *heap, size_t i)
++{
++ void **ptrs = heap->ptrs;
++ size_t l, r, largest;
++
++ for (;;) {
++ void *tmp;
++
++ l = left(i);
++ r = right(i);
++ if (l < heap->len && heap->gt(ptrs[l], ptrs[i]))
++ largest = l;
++ else
++ largest = i;
++ if (r < heap->len && heap->gt(ptrs[r], ptrs[largest]))
++ largest = r;
++ if (largest == i)
++ break;
++ tmp = ptrs[i];
++ ptrs[i] = ptrs[largest];
++ ptrs[largest] = tmp;
++ i = largest;
++ }
++ lttng_check_heap(heap);
++}
++
++void *lttng_heap_replace_max(struct lttng_ptr_heap *heap, void *p)
++{
++ void *res;
++
++ if (!heap->len) {
++ (void) heap_set_len(heap, 1);
++ heap->ptrs[0] = p;
++ lttng_check_heap(heap);
++ return NULL;
++ }
++
++ /* Replace the current max and heapify */
++ res = heap->ptrs[0];
++ heap->ptrs[0] = p;
++ heapify(heap, 0);
++ return res;
++}
++
++int lttng_heap_insert(struct lttng_ptr_heap *heap, void *p)
++{
++ void **ptrs;
++ size_t pos;
++ int ret;
++
++ ret = heap_set_len(heap, heap->len + 1);
++ if (ret)
++ return ret;
++ ptrs = heap->ptrs;
++ pos = heap->len - 1;
++ while (pos > 0 && heap->gt(p, ptrs[parent(pos)])) {
++ /* Move parent down until we find the right spot */
++ ptrs[pos] = ptrs[parent(pos)];
++ pos = parent(pos);
++ }
++ ptrs[pos] = p;
++ lttng_check_heap(heap);
++ return 0;
++}
++
++void *lttng_heap_remove(struct lttng_ptr_heap *heap)
++{
++ switch (heap->len) {
++ case 0:
++ return NULL;
++ case 1:
++ (void) heap_set_len(heap, 0);
++ return heap->ptrs[0];
++ }
++ /* Shrink, replace the current max by previous last entry and heapify */
++ heap_set_len(heap, heap->len - 1);
++ /* len changed. previous last entry is at heap->len */
++ return lttng_heap_replace_max(heap, heap->ptrs[heap->len]);
++}
++
++void *lttng_heap_cherrypick(struct lttng_ptr_heap *heap, void *p)
++{
++ size_t pos, len = heap->len;
++
++ for (pos = 0; pos < len; pos++)
++ if (heap->ptrs[pos] == p)
++ goto found;
++ return NULL;
++found:
++ if (heap->len == 1) {
++ (void) heap_set_len(heap, 0);
++ lttng_check_heap(heap);
++ return heap->ptrs[0];
++ }
++ /* Replace p with previous last entry and heapify. */
++ heap_set_len(heap, heap->len - 1);
++ /* len changed. previous last entry is at heap->len */
++ heap->ptrs[pos] = heap->ptrs[heap->len];
++ heapify(heap, pos);
++ return p;
++}
+--- /dev/null
++++ b/drivers/staging/lttng/lib/prio_heap/lttng_prio_heap.h
+@@ -0,0 +1,125 @@
++#ifndef _LTTNG_PRIO_HEAP_H
++#define _LTTNG_PRIO_HEAP_H
++
++/*
++ * lttng_prio_heap.h
++ *
++ * Priority heap containing pointers. Based on CLRS, chapter 6.
++ *
++ * Copyright 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to deal
++ * in the Software without restriction, including without limitation the rights
++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
++ * copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ */
++
++#include <linux/gfp.h>
++
++struct lttng_ptr_heap {
++ size_t len, alloc_len;
++ void **ptrs;
++ int (*gt)(void *a, void *b);
++ gfp_t gfpmask;
++};
++
++#ifdef DEBUG_HEAP
++void lttng_check_heap(const struct lttng_ptr_heap *heap);
++#else
++static inline
++void lttng_check_heap(const struct lttng_ptr_heap *heap)
++{
++}
++#endif
++
++/**
++ * lttng_heap_maximum - return the largest element in the heap
++ * @heap: the heap to be operated on
++ *
++ * Returns the largest element in the heap, without performing any modification
++ * to the heap structure. Returns NULL if the heap is empty.
++ */
++static inline void *lttng_heap_maximum(const struct lttng_ptr_heap *heap)
++{
++ lttng_check_heap(heap);
++ return heap->len ? heap->ptrs[0] : NULL;
++}
++
++/**
++ * lttng_heap_init - initialize the heap
++ * @heap: the heap to initialize
++ * @alloc_len: number of elements initially allocated
++ * @gfp: allocation flags
++ * @gt: function to compare the elements
++ *
++ * Returns -ENOMEM if out of memory.
++ */
++extern int lttng_heap_init(struct lttng_ptr_heap *heap,
++ size_t alloc_len, gfp_t gfpmask,
++ int gt(void *a, void *b));
++
++/**
++ * lttng_heap_free - free the heap
++ * @heap: the heap to free
++ */
++extern void lttng_heap_free(struct lttng_ptr_heap *heap);
++
++/**
++ * lttng_heap_insert - insert an element into the heap
++ * @heap: the heap to be operated on
++ * @p: the element to add
++ *
++ * Insert an element into the heap.
++ *
++ * Returns -ENOMEM if out of memory.
++ */
++extern int lttng_heap_insert(struct lttng_ptr_heap *heap, void *p);
++
++/**
++ * lttng_heap_remove - remove the largest element from the heap
++ * @heap: the heap to be operated on
++ *
++ * Returns the largest element in the heap. It removes this element from the
++ * heap. Returns NULL if the heap is empty.
++ */
++extern void *lttng_heap_remove(struct lttng_ptr_heap *heap);
++
++/**
++ * lttng_heap_cherrypick - remove a given element from the heap
++ * @heap: the heap to be operated on
++ * @p: the element
++ *
++ * Remove the given element from the heap. Return the element if present, else
++ * return NULL. This algorithm has a complexity of O(n), which is higher than
++ * O(log(n)) provided by the rest of this API.
++ */
++extern void *lttng_heap_cherrypick(struct lttng_ptr_heap *heap, void *p);
++
++/**
++ * lttng_heap_replace_max - replace the the largest element from the heap
++ * @heap: the heap to be operated on
++ * @p: the pointer to be inserted as topmost element replacement
++ *
++ * Returns the largest element in the heap. It removes this element from the
++ * heap. The heap is rebalanced only once after the insertion. Returns NULL if
++ * the heap is empty.
++ *
++ * This is the equivalent of calling heap_remove() and then heap_insert(), but
++ * it only rebalances the heap once. It never allocates memory.
++ */
++extern void *lttng_heap_replace_max(struct lttng_ptr_heap *heap, void *p);
++
++#endif /* _LTTNG_PRIO_HEAP_H */
+--- /dev/null
++++ b/drivers/staging/lttng/lib/ringbuffer/api.h
+@@ -0,0 +1,37 @@
++#ifndef _LIB_RING_BUFFER_API_H
++#define _LIB_RING_BUFFER_API_H
++
++/*
++ * lib/ringbuffer/api.h
++ *
++ * Ring Buffer API.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include "../../wrapper/ringbuffer/backend.h"
++#include "../../wrapper/ringbuffer/frontend.h"
++#include "../../wrapper/ringbuffer/vfs.h"
++
++/*
++ * ring_buffer_frontend_api.h contains static inline functions that depend on
++ * client static inlines. Hence the inclusion of this "api" header only
++ * within the client.
++ */
++#include "../../wrapper/ringbuffer/frontend_api.h"
++
++#endif /* _LIB_RING_BUFFER_API_H */
+--- /dev/null
++++ b/drivers/staging/lttng/lib/ringbuffer/backend.h
+@@ -0,0 +1,272 @@
++#ifndef _LIB_RING_BUFFER_BACKEND_H
++#define _LIB_RING_BUFFER_BACKEND_H
++
++/*
++ * lib/ringbuffer/backend.h
++ *
++ * Ring buffer backend (API).
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ *
++ * Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by
++ * the reader in flight recorder mode.
++ */
++
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/timer.h>
++#include <linux/wait.h>
++#include <linux/poll.h>
++#include <linux/list.h>
++#include <linux/fs.h>
++#include <linux/mm.h>
++#include <linux/uaccess.h>
++
++/* Internal helpers */
++#include "../../wrapper/ringbuffer/backend_internal.h"
++#include "../../wrapper/ringbuffer/frontend_internal.h"
++
++/* Ring buffer backend API */
++
++/* Ring buffer backend access (read/write) */
++
++extern size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb,
++ size_t offset, void *dest, size_t len);
++
++extern int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
++ size_t offset, void __user *dest,
++ size_t len);
++
++extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb,
++ size_t offset, void *dest, size_t len);
++
++extern struct page **
++lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend *bufb, size_t offset,
++ void ***virt);
++
++/*
++ * Return the address where a given offset is located.
++ * Should be used to get the current subbuffer header pointer. Given we know
++ * it's never on a page boundary, it's safe to write directly to this address,
++ * as long as the write is never bigger than a page size.
++ */
++extern void *
++lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
++ size_t offset);
++extern void *
++lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
++ size_t offset);
++
++/**
++ * lib_ring_buffer_write - write data to a buffer backend
++ * @config : ring buffer instance configuration
++ * @ctx: ring buffer context. (input arguments only)
++ * @src : source pointer to copy from
++ * @len : length of data to copy
++ *
++ * This function copies "len" bytes of data from a source pointer to a buffer
++ * backend, at the current context offset. This is more or less a buffer
++ * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
++ * if copy is crossing a page boundary.
++ */
++static inline
++void lib_ring_buffer_write(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer_ctx *ctx,
++ const void *src, size_t len)
++{
++ struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
++ struct channel_backend *chanb = &ctx->chan->backend;
++ size_t sbidx, index;
++ size_t offset = ctx->buf_offset;
++ ssize_t pagecpy;
++ struct lib_ring_buffer_backend_pages *rpages;
++ unsigned long sb_bindex, id;
++
++ if (unlikely(!len))
++ return;
++ offset &= chanb->buf_size - 1;
++ sbidx = offset >> chanb->subbuf_size_order;
++ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
++ pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
++ id = bufb->buf_wsb[sbidx].id;
++ sb_bindex = subbuffer_id_get_index(config, id);
++ rpages = bufb->array[sb_bindex];
++ CHAN_WARN_ON(ctx->chan,
++ config->mode == RING_BUFFER_OVERWRITE
++ && subbuffer_id_is_noref(config, id));
++ if (likely(pagecpy == len))
++ lib_ring_buffer_do_copy(config,
++ rpages->p[index].virt
++ + (offset & ~PAGE_MASK),
++ src, len);
++ else
++ _lib_ring_buffer_write(bufb, offset, src, len, 0);
++ ctx->buf_offset += len;
++}
++
++/**
++ * lib_ring_buffer_memset - write len bytes of c to a buffer backend
++ * @config : ring buffer instance configuration
++ * @bufb : ring buffer backend
++ * @offset : offset within the buffer
++ * @c : the byte to copy
++ * @len : number of bytes to copy
++ *
++ * This function writes "len" bytes of "c" to a buffer backend, at a specific
++ * offset. This is more or less a buffer backend-specific memset() operation.
++ * Calls the slow path (_ring_buffer_memset) if write is crossing a page
++ * boundary.
++ */
++static inline
++void lib_ring_buffer_memset(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer_ctx *ctx, int c, size_t len)
++{
++
++ struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
++ struct channel_backend *chanb = &ctx->chan->backend;
++ size_t sbidx, index;
++ size_t offset = ctx->buf_offset;
++ ssize_t pagecpy;
++ struct lib_ring_buffer_backend_pages *rpages;
++ unsigned long sb_bindex, id;
++
++ if (unlikely(!len))
++ return;
++ offset &= chanb->buf_size - 1;
++ sbidx = offset >> chanb->subbuf_size_order;
++ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
++ pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
++ id = bufb->buf_wsb[sbidx].id;
++ sb_bindex = subbuffer_id_get_index(config, id);
++ rpages = bufb->array[sb_bindex];
++ CHAN_WARN_ON(ctx->chan,
++ config->mode == RING_BUFFER_OVERWRITE
++ && subbuffer_id_is_noref(config, id));
++ if (likely(pagecpy == len))
++ lib_ring_buffer_do_memset(rpages->p[index].virt
++ + (offset & ~PAGE_MASK),
++ c, len);
++ else
++ _lib_ring_buffer_memset(bufb, offset, c, len, 0);
++ ctx->buf_offset += len;
++}
++
++/**
++ * lib_ring_buffer_copy_from_user_inatomic - write userspace data to a buffer backend
++ * @config : ring buffer instance configuration
++ * @ctx: ring buffer context. (input arguments only)
++ * @src : userspace source pointer to copy from
++ * @len : length of data to copy
++ *
++ * This function copies "len" bytes of data from a userspace pointer to a
++ * buffer backend, at the current context offset. This is more or less a buffer
++ * backend-specific memcpy() operation. Calls the slow path
++ * (_ring_buffer_write_from_user_inatomic) if copy is crossing a page boundary.
++ * Disable the page fault handler to ensure we never try to take the mmap_sem.
++ */
++static inline
++void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer_ctx *ctx,
++ const void __user *src, size_t len)
++{
++ struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
++ struct channel_backend *chanb = &ctx->chan->backend;
++ size_t sbidx, index;
++ size_t offset = ctx->buf_offset;
++ ssize_t pagecpy;
++ struct lib_ring_buffer_backend_pages *rpages;
++ unsigned long sb_bindex, id;
++ unsigned long ret;
++ mm_segment_t old_fs = get_fs();
++
++ if (unlikely(!len))
++ return;
++ offset &= chanb->buf_size - 1;
++ sbidx = offset >> chanb->subbuf_size_order;
++ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
++ pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
++ id = bufb->buf_wsb[sbidx].id;
++ sb_bindex = subbuffer_id_get_index(config, id);
++ rpages = bufb->array[sb_bindex];
++ CHAN_WARN_ON(ctx->chan,
++ config->mode == RING_BUFFER_OVERWRITE
++ && subbuffer_id_is_noref(config, id));
++
++ set_fs(KERNEL_DS);
++ pagefault_disable();
++ if (unlikely(!access_ok(VERIFY_READ, src, len)))
++ goto fill_buffer;
++
++ if (likely(pagecpy == len)) {
++ ret = lib_ring_buffer_do_copy_from_user_inatomic(
++ rpages->p[index].virt + (offset & ~PAGE_MASK),
++ src, len);
++ if (unlikely(ret > 0)) {
++ len -= (pagecpy - ret);
++ offset += (pagecpy - ret);
++ goto fill_buffer;
++ }
++ } else {
++ _lib_ring_buffer_copy_from_user_inatomic(bufb, offset, src, len, 0);
++ }
++ pagefault_enable();
++ set_fs(old_fs);
++ ctx->buf_offset += len;
++
++ return;
++
++fill_buffer:
++ pagefault_enable();
++ set_fs(old_fs);
++ /*
++ * In the error path we call the slow path version to avoid
++ * the pollution of static inline code.
++ */
++ _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
++}
++
++/*
++ * This accessor counts the number of unread records in a buffer.
++ * It only provides a consistent value if no reads not writes are performed
++ * concurrently.
++ */
++static inline
++unsigned long lib_ring_buffer_get_records_unread(
++ const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf)
++{
++ struct lib_ring_buffer_backend *bufb = &buf->backend;
++ struct lib_ring_buffer_backend_pages *pages;
++ unsigned long records_unread = 0, sb_bindex, id;
++ unsigned int i;
++
++ for (i = 0; i < bufb->chan->backend.num_subbuf; i++) {
++ id = bufb->buf_wsb[i].id;
++ sb_bindex = subbuffer_id_get_index(config, id);
++ pages = bufb->array[sb_bindex];
++ records_unread += v_read(config, &pages->records_unread);
++ }
++ if (config->mode == RING_BUFFER_OVERWRITE) {
++ id = bufb->buf_rsb.id;
++ sb_bindex = subbuffer_id_get_index(config, id);
++ pages = bufb->array[sb_bindex];
++ records_unread += v_read(config, &pages->records_unread);
++ }
++ return records_unread;
++}
++
++#endif /* _LIB_RING_BUFFER_BACKEND_H */
+--- /dev/null
++++ b/drivers/staging/lttng/lib/ringbuffer/backend_internal.h
+@@ -0,0 +1,461 @@
++#ifndef _LIB_RING_BUFFER_BACKEND_INTERNAL_H
++#define _LIB_RING_BUFFER_BACKEND_INTERNAL_H
++
++/*
++ * lib/ringbuffer/backend_internal.h
++ *
++ * Ring buffer backend (internal helpers).
++ *
++ * Copyright (C) 2008-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include "../../wrapper/ringbuffer/config.h"
++#include "../../wrapper/ringbuffer/backend_types.h"
++#include "../../wrapper/ringbuffer/frontend_types.h"
++#include <linux/string.h>
++#include <linux/uaccess.h>
++
++/* Ring buffer backend API presented to the frontend */
++
++/* Ring buffer and channel backend create/free */
++
++int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
++ struct channel_backend *chan, int cpu);
++void channel_backend_unregister_notifiers(struct channel_backend *chanb);
++void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb);
++int channel_backend_init(struct channel_backend *chanb,
++ const char *name,
++ const struct lib_ring_buffer_config *config,
++ void *priv, size_t subbuf_size,
++ size_t num_subbuf);
++void channel_backend_free(struct channel_backend *chanb);
++
++void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb);
++void channel_backend_reset(struct channel_backend *chanb);
++
++int lib_ring_buffer_backend_init(void);
++void lib_ring_buffer_backend_exit(void);
++
++extern void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb,
++ size_t offset, const void *src, size_t len,
++ ssize_t pagecpy);
++extern void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
++ size_t offset, int c, size_t len,
++ ssize_t pagecpy);
++extern void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
++ size_t offset, const void *src,
++ size_t len, ssize_t pagecpy);
++
++/*
++ * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
++ * exchanged atomically.
++ *
++ * Top half word, except lowest bit, belongs to "offset", which is used to keep
++ * to count the produced buffers. For overwrite mode, this provides the
++ * consumer with the capacity to read subbuffers in order, handling the
++ * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
++ * systems) concurrently with a single execution of get_subbuf (between offset
++ * sampling and subbuffer ID exchange).
++ */
++
++#define HALF_ULONG_BITS (BITS_PER_LONG >> 1)
++
++#define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
++#define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
++#define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
++/*
++ * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
++ */
++#define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
++#define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
++#define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
++/*
++ * In overwrite mode: lowest half of word is used for index.
++ * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
++ * In producer-consumer mode: whole word used for index.
++ */
++#define SB_ID_INDEX_SHIFT 0
++#define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
++#define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
++
++/*
++ * Construct the subbuffer id from offset, index and noref. Use only the index
++ * for producer-consumer mode (offset and noref are only used in overwrite
++ * mode).
++ */
++static inline
++unsigned long subbuffer_id(const struct lib_ring_buffer_config *config,
++ unsigned long offset, unsigned long noref,
++ unsigned long index)
++{
++ if (config->mode == RING_BUFFER_OVERWRITE)
++ return (offset << SB_ID_OFFSET_SHIFT)
++ | (noref << SB_ID_NOREF_SHIFT)
++ | index;
++ else
++ return index;
++}
++
++/*
++ * Compare offset with the offset contained within id. Return 1 if the offset
++ * bits are identical, else 0.
++ */
++static inline
++int subbuffer_id_compare_offset(const struct lib_ring_buffer_config *config,
++ unsigned long id, unsigned long offset)
++{
++ return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
++}
++
++static inline
++unsigned long subbuffer_id_get_index(const struct lib_ring_buffer_config *config,
++ unsigned long id)
++{
++ if (config->mode == RING_BUFFER_OVERWRITE)
++ return id & SB_ID_INDEX_MASK;
++ else
++ return id;
++}
++
++static inline
++unsigned long subbuffer_id_is_noref(const struct lib_ring_buffer_config *config,
++ unsigned long id)
++{
++ if (config->mode == RING_BUFFER_OVERWRITE)
++ return !!(id & SB_ID_NOREF_MASK);
++ else
++ return 1;
++}
++
++/*
++ * Only used by reader on subbuffer ID it has exclusive access to. No volatile
++ * needed.
++ */
++static inline
++void subbuffer_id_set_noref(const struct lib_ring_buffer_config *config,
++ unsigned long *id)
++{
++ if (config->mode == RING_BUFFER_OVERWRITE)
++ *id |= SB_ID_NOREF_MASK;
++}
++
++static inline
++void subbuffer_id_set_noref_offset(const struct lib_ring_buffer_config *config,
++ unsigned long *id, unsigned long offset)
++{
++ unsigned long tmp;
++
++ if (config->mode == RING_BUFFER_OVERWRITE) {
++ tmp = *id;
++ tmp &= ~SB_ID_OFFSET_MASK;
++ tmp |= offset << SB_ID_OFFSET_SHIFT;
++ tmp |= SB_ID_NOREF_MASK;
++ /* Volatile store, read concurrently by readers. */
++ ACCESS_ONCE(*id) = tmp;
++ }
++}
++
++/* No volatile access, since already used locally */
++static inline
++void subbuffer_id_clear_noref(const struct lib_ring_buffer_config *config,
++ unsigned long *id)
++{
++ if (config->mode == RING_BUFFER_OVERWRITE)
++ *id &= ~SB_ID_NOREF_MASK;
++}
++
++/*
++ * For overwrite mode, cap the number of subbuffers per buffer to:
++ * 2^16 on 32-bit architectures
++ * 2^32 on 64-bit architectures
++ * This is required to fit in the index part of the ID. Return 0 on success,
++ * -EPERM on failure.
++ */
++static inline
++int subbuffer_id_check_index(const struct lib_ring_buffer_config *config,
++ unsigned long num_subbuf)
++{
++ if (config->mode == RING_BUFFER_OVERWRITE)
++ return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0;
++ else
++ return 0;
++}
++
++static inline
++void subbuffer_count_record(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer_backend *bufb,
++ unsigned long idx)
++{
++ unsigned long sb_bindex;
++
++ sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
++ v_inc(config, &bufb->array[sb_bindex]->records_commit);
++}
++
++/*
++ * Reader has exclusive subbuffer access for record consumption. No need to
++ * perform the decrement atomically.
++ */
++static inline
++void subbuffer_consume_record(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer_backend *bufb)
++{
++ unsigned long sb_bindex;
++
++ sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
++ CHAN_WARN_ON(bufb->chan,
++ !v_read(config, &bufb->array[sb_bindex]->records_unread));
++ /* Non-atomic decrement protected by exclusive subbuffer access */
++ _v_dec(config, &bufb->array[sb_bindex]->records_unread);
++ v_inc(config, &bufb->records_read);
++}
++
++static inline
++unsigned long subbuffer_get_records_count(
++ const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer_backend *bufb,
++ unsigned long idx)
++{
++ unsigned long sb_bindex;
++
++ sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
++ return v_read(config, &bufb->array[sb_bindex]->records_commit);
++}
++
++/*
++ * Must be executed at subbuffer delivery when the writer has _exclusive_
++ * subbuffer access. See ring_buffer_check_deliver() for details.
++ * ring_buffer_get_records_count() must be called to get the records count
++ * before this function, because it resets the records_commit count.
++ */
++static inline
++unsigned long subbuffer_count_records_overrun(
++ const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer_backend *bufb,
++ unsigned long idx)
++{
++ struct lib_ring_buffer_backend_pages *pages;
++ unsigned long overruns, sb_bindex;
++
++ sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
++ pages = bufb->array[sb_bindex];
++ overruns = v_read(config, &pages->records_unread);
++ v_set(config, &pages->records_unread,
++ v_read(config, &pages->records_commit));
++ v_set(config, &pages->records_commit, 0);
++
++ return overruns;
++}
++
++static inline
++void subbuffer_set_data_size(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer_backend *bufb,
++ unsigned long idx,
++ unsigned long data_size)
++{
++ struct lib_ring_buffer_backend_pages *pages;
++ unsigned long sb_bindex;
++
++ sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
++ pages = bufb->array[sb_bindex];
++ pages->data_size = data_size;
++}
++
++static inline
++unsigned long subbuffer_get_read_data_size(
++ const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer_backend *bufb)
++{
++ struct lib_ring_buffer_backend_pages *pages;
++ unsigned long sb_bindex;
++
++ sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
++ pages = bufb->array[sb_bindex];
++ return pages->data_size;
++}
++
++static inline
++unsigned long subbuffer_get_data_size(
++ const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer_backend *bufb,
++ unsigned long idx)
++{
++ struct lib_ring_buffer_backend_pages *pages;
++ unsigned long sb_bindex;
++
++ sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
++ pages = bufb->array[sb_bindex];
++ return pages->data_size;
++}
++
++/**
++ * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
++ * writer.
++ */
++static inline
++void lib_ring_buffer_clear_noref(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer_backend *bufb,
++ unsigned long idx)
++{
++ unsigned long id, new_id;
++
++ if (config->mode != RING_BUFFER_OVERWRITE)
++ return;
++
++ /*
++ * Performing a volatile access to read the sb_pages, because we want to
++ * read a coherent version of the pointer and the associated noref flag.
++ */
++ id = ACCESS_ONCE(bufb->buf_wsb[idx].id);
++ for (;;) {
++ /* This check is called on the fast path for each record. */
++ if (likely(!subbuffer_id_is_noref(config, id))) {
++ /*
++ * Store after load dependency ordering the writes to
++ * the subbuffer after load and test of the noref flag
++ * matches the memory barrier implied by the cmpxchg()
++ * in update_read_sb_index().
++ */
++ return; /* Already writing to this buffer */
++ }
++ new_id = id;
++ subbuffer_id_clear_noref(config, &new_id);
++ new_id = cmpxchg(&bufb->buf_wsb[idx].id, id, new_id);
++ if (likely(new_id == id))
++ break;
++ id = new_id;
++ }
++}
++
++/**
++ * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
++ * called by writer.
++ */
++static inline
++void lib_ring_buffer_set_noref_offset(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer_backend *bufb,
++ unsigned long idx, unsigned long offset)
++{
++ if (config->mode != RING_BUFFER_OVERWRITE)
++ return;
++
++ /*
++ * Because ring_buffer_set_noref() is only called by a single thread
++ * (the one which updated the cc_sb value), there are no concurrent
++ * updates to take care of: other writers have not updated cc_sb, so
++ * they cannot set the noref flag, and concurrent readers cannot modify
++ * the pointer because the noref flag is not set yet.
++ * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
++ * to the subbuffer before this set noref operation.
++ * subbuffer_set_noref() uses a volatile store to deal with concurrent
++ * readers of the noref flag.
++ */
++ CHAN_WARN_ON(bufb->chan,
++ subbuffer_id_is_noref(config, bufb->buf_wsb[idx].id));
++ /*
++ * Memory barrier that ensures counter stores are ordered before set
++ * noref and offset.
++ */
++ smp_mb();
++ subbuffer_id_set_noref_offset(config, &bufb->buf_wsb[idx].id, offset);
++}
++
++/**
++ * update_read_sb_index - Read-side subbuffer index update.
++ */
++static inline
++int update_read_sb_index(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer_backend *bufb,
++ struct channel_backend *chanb,
++ unsigned long consumed_idx,
++ unsigned long consumed_count)
++{
++ unsigned long old_id, new_id;
++
++ if (config->mode == RING_BUFFER_OVERWRITE) {
++ /*
++ * Exchange the target writer subbuffer with our own unused
++ * subbuffer. No need to use ACCESS_ONCE() here to read the
++ * old_wpage, because the value read will be confirmed by the
++ * following cmpxchg().
++ */
++ old_id = bufb->buf_wsb[consumed_idx].id;
++ if (unlikely(!subbuffer_id_is_noref(config, old_id)))
++ return -EAGAIN;
++ /*
++ * Make sure the offset count we are expecting matches the one
++ * indicated by the writer.
++ */
++ if (unlikely(!subbuffer_id_compare_offset(config, old_id,
++ consumed_count)))
++ return -EAGAIN;
++ CHAN_WARN_ON(bufb->chan,
++ !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
++ subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
++ consumed_count);
++ new_id = cmpxchg(&bufb->buf_wsb[consumed_idx].id, old_id,
++ bufb->buf_rsb.id);
++ if (unlikely(old_id != new_id))
++ return -EAGAIN;
++ bufb->buf_rsb.id = new_id;
++ } else {
++ /* No page exchange, use the writer page directly */
++ bufb->buf_rsb.id = bufb->buf_wsb[consumed_idx].id;
++ }
++ return 0;
++}
++
++/*
++ * Use the architecture-specific memcpy implementation for constant-sized
++ * inputs, but rely on an inline memcpy for length statically unknown.
++ * The function call to memcpy is just way too expensive for a fast path.
++ */
++#define lib_ring_buffer_do_copy(config, dest, src, len) \
++do { \
++ size_t __len = (len); \
++ if (__builtin_constant_p(len)) \
++ memcpy(dest, src, __len); \
++ else \
++ inline_memcpy(dest, src, __len); \
++} while (0)
++
++/*
++ * We use __copy_from_user_inatomic to copy userspace data since we already
++ * did the access_ok for the whole range.
++ */
++static inline
++unsigned long lib_ring_buffer_do_copy_from_user_inatomic(void *dest,
++ const void __user *src,
++ unsigned long len)
++{
++ return __copy_from_user_inatomic(dest, src, len);
++}
++
++/*
++ * write len bytes to dest with c
++ */
++static inline
++void lib_ring_buffer_do_memset(char *dest, int c,
++ unsigned long len)
++{
++ unsigned long i;
++
++ for (i = 0; i < len; i++)
++ dest[i] = c;
++}
++
++#endif /* _LIB_RING_BUFFER_BACKEND_INTERNAL_H */
+--- /dev/null
++++ b/drivers/staging/lttng/lib/ringbuffer/backend_types.h
+@@ -0,0 +1,97 @@
++#ifndef _LIB_RING_BUFFER_BACKEND_TYPES_H
++#define _LIB_RING_BUFFER_BACKEND_TYPES_H
++
++/*
++ * lib/ringbuffer/backend_types.h
++ *
++ * Ring buffer backend (types).
++ *
++ * Copyright (C) 2008-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/cpumask.h>
++#include <linux/types.h>
++
++struct lib_ring_buffer_backend_page {
++ void *virt; /* page virtual address (cached) */
++ struct page *page; /* pointer to page structure */
++};
++
++struct lib_ring_buffer_backend_pages {
++ unsigned long mmap_offset; /* offset of the subbuffer in mmap */
++ union v_atomic records_commit; /* current records committed count */
++ union v_atomic records_unread; /* records to read */
++ unsigned long data_size; /* Amount of data to read from subbuf */
++ struct lib_ring_buffer_backend_page p[];
++};
++
++struct lib_ring_buffer_backend_subbuffer {
++ /* Identifier for subbuf backend pages. Exchanged atomically. */
++ unsigned long id; /* backend subbuffer identifier */
++};
++
++/*
++ * Forward declaration of frontend-specific channel and ring_buffer.
++ */
++struct channel;
++struct lib_ring_buffer;
++
++struct lib_ring_buffer_backend {
++ /* Array of ring_buffer_backend_subbuffer for writer */
++ struct lib_ring_buffer_backend_subbuffer *buf_wsb;
++ /* ring_buffer_backend_subbuffer for reader */
++ struct lib_ring_buffer_backend_subbuffer buf_rsb;
++ /*
++ * Pointer array of backend pages, for whole buffer.
++ * Indexed by ring_buffer_backend_subbuffer identifier (id) index.
++ */
++ struct lib_ring_buffer_backend_pages **array;
++ unsigned int num_pages_per_subbuf;
++
++ struct channel *chan; /* Associated channel */
++ int cpu; /* This buffer's cpu. -1 if global. */
++ union v_atomic records_read; /* Number of records read */
++ unsigned int allocated:1; /* is buffer allocated ? */
++};
++
++struct channel_backend {
++ unsigned long buf_size; /* Size of the buffer */
++ unsigned long subbuf_size; /* Sub-buffer size */
++ unsigned int subbuf_size_order; /* Order of sub-buffer size */
++ unsigned int num_subbuf_order; /*
++ * Order of number of sub-buffers/buffer
++ * for writer.
++ */
++ unsigned int buf_size_order; /* Order of buffer size */
++ unsigned int extra_reader_sb:1; /* has extra reader subbuffer ? */
++ struct lib_ring_buffer *buf; /* Channel per-cpu buffers */
++
++ unsigned long num_subbuf; /* Number of sub-buffers for writer */
++ u64 start_tsc; /* Channel creation TSC value */
++ void *priv; /* Client-specific information */
++ struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */
++ /*
++ * We need to copy config because the module containing the
++ * source config can vanish before the last reference to this
++ * channel's streams is released.
++ */
++ struct lib_ring_buffer_config config; /* Ring buffer configuration */
++ cpumask_var_t cpumask; /* Allocated per-cpu buffers cpumask */
++ char name[NAME_MAX]; /* Channel name */
++};
++
++#endif /* _LIB_RING_BUFFER_BACKEND_TYPES_H */
+--- /dev/null
++++ b/drivers/staging/lttng/lib/ringbuffer/config.h
+@@ -0,0 +1,315 @@
++#ifndef _LIB_RING_BUFFER_CONFIG_H
++#define _LIB_RING_BUFFER_CONFIG_H
++
++/*
++ * lib/ringbuffer/config.h
++ *
++ * Ring buffer configuration header. Note: after declaring the standard inline
++ * functions, clients should also include linux/ringbuffer/api.h.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/types.h>
++#include <linux/percpu.h>
++#include "../align.h"
++#include "../../lttng-tracer-core.h"
++
++struct lib_ring_buffer;
++struct channel;
++struct lib_ring_buffer_config;
++struct lib_ring_buffer_ctx;
++
++/*
++ * Ring buffer client callbacks. Only used by slow path, never on fast path.
++ * For the fast path, record_header_size(), ring_buffer_clock_read() should be
++ * provided as inline functions too. These may simply return 0 if not used by
++ * the client.
++ */
++struct lib_ring_buffer_client_cb {
++ /* Mandatory callbacks */
++
++ /* A static inline version is also required for fast path */
++ u64 (*ring_buffer_clock_read) (struct channel *chan);
++ size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
++ struct channel *chan, size_t offset,
++ size_t *pre_header_padding,
++ struct lib_ring_buffer_ctx *ctx);
++
++ /* Slow path only, at subbuffer switch */
++ size_t (*subbuffer_header_size) (void);
++ void (*buffer_begin) (struct lib_ring_buffer *buf, u64 tsc,
++ unsigned int subbuf_idx);
++ void (*buffer_end) (struct lib_ring_buffer *buf, u64 tsc,
++ unsigned int subbuf_idx, unsigned long data_size);
++
++ /* Optional callbacks (can be set to NULL) */
++
++ /* Called at buffer creation/finalize */
++ int (*buffer_create) (struct lib_ring_buffer *buf, void *priv,
++ int cpu, const char *name);
++ /*
++ * Clients should guarantee that no new reader handle can be opened
++ * after finalize.
++ */
++ void (*buffer_finalize) (struct lib_ring_buffer *buf, void *priv, int cpu);
++
++ /*
++ * Extract header length, payload length and timestamp from event
++ * record. Used by buffer iterators. Timestamp is only used by channel
++ * iterator.
++ */
++ void (*record_get) (const struct lib_ring_buffer_config *config,
++ struct channel *chan, struct lib_ring_buffer *buf,
++ size_t offset, size_t *header_len,
++ size_t *payload_len, u64 *timestamp);
++};
++
++/*
++ * Ring buffer instance configuration.
++ *
++ * Declare as "static const" within the client object to ensure the inline fast
++ * paths can be optimized.
++ *
++ * alloc/sync pairs:
++ *
++ * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
++ * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
++ * with preemption disabled (lib_ring_buffer_get_cpu() and
++ * lib_ring_buffer_put_cpu()).
++ *
++ * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
++ * Per-cpu buffer with global synchronization. Tracing can be performed with
++ * preemption enabled, statistically stays on the local buffers.
++ *
++ * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
++ * Should only be used for buffers belonging to a single thread or protected
++ * by mutual exclusion by the client. Note that periodical sub-buffer switch
++ * should be disabled in this kind of configuration.
++ *
++ * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
++ * Global shared buffer with global synchronization.
++ *
++ * wakeup:
++ *
++ * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu deferrable timers to poll the
++ * buffers and wake up readers if data is ready. Mainly useful for tracers which
++ * don't want to call into the wakeup code on the tracing path. Use in
++ * combination with "read_timer_interval" channel_create() argument.
++ *
++ * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
++ * ready to read. Lower latencies before the reader is woken up. Mainly suitable
++ * for drivers.
++ *
++ * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
++ * has the responsibility to perform wakeups.
++ */
++struct lib_ring_buffer_config {
++ enum {
++ RING_BUFFER_ALLOC_PER_CPU,
++ RING_BUFFER_ALLOC_GLOBAL,
++ } alloc;
++ enum {
++ RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
++ RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
++ } sync;
++ enum {
++ RING_BUFFER_OVERWRITE, /* Overwrite when buffer full */
++ RING_BUFFER_DISCARD, /* Discard when buffer full */
++ } mode;
++ enum {
++ RING_BUFFER_SPLICE,
++ RING_BUFFER_MMAP,
++ RING_BUFFER_READ, /* TODO */
++ RING_BUFFER_ITERATOR,
++ RING_BUFFER_NONE,
++ } output;
++ enum {
++ RING_BUFFER_PAGE,
++ RING_BUFFER_VMAP, /* TODO */
++ RING_BUFFER_STATIC, /* TODO */
++ } backend;
++ enum {
++ RING_BUFFER_NO_OOPS_CONSISTENCY,
++ RING_BUFFER_OOPS_CONSISTENCY,
++ } oops;
++ enum {
++ RING_BUFFER_IPI_BARRIER,
++ RING_BUFFER_NO_IPI_BARRIER,
++ } ipi;
++ enum {
++ RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
++ RING_BUFFER_WAKEUP_BY_WRITER, /*
++ * writer wakes up reader,
++ * not lock-free
++ * (takes spinlock).
++ */
++ } wakeup;
++ /*
++ * tsc_bits: timestamp bits saved at each record.
++ * 0 and 64 disable the timestamp compression scheme.
++ */
++ unsigned int tsc_bits;
++ struct lib_ring_buffer_client_cb cb;
++};
++
++/*
++ * ring buffer context
++ *
++ * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
++ * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
++ * lib_ring_buffer_write().
++ */
++struct lib_ring_buffer_ctx {
++ /* input received by lib_ring_buffer_reserve(), saved here. */
++ struct channel *chan; /* channel */
++ void *priv; /* client private data */
++ size_t data_size; /* size of payload */
++ int largest_align; /*
++ * alignment of the largest element
++ * in the payload
++ */
++ int cpu; /* processor id */
++
++ /* output from lib_ring_buffer_reserve() */
++ struct lib_ring_buffer *buf; /*
++ * buffer corresponding to processor id
++ * for this channel
++ */
++ size_t slot_size; /* size of the reserved slot */
++ unsigned long buf_offset; /* offset following the record header */
++ unsigned long pre_offset; /*
++ * Initial offset position _before_
++ * the record is written. Positioned
++ * prior to record header alignment
++ * padding.
++ */
++ u64 tsc; /* time-stamp counter value */
++ unsigned int rflags; /* reservation flags */
++};
++
++/**
++ * lib_ring_buffer_ctx_init - initialize ring buffer context
++ * @ctx: ring buffer context to initialize
++ * @chan: channel
++ * @priv: client private data
++ * @data_size: size of record data payload. It must be greater than 0.
++ * @largest_align: largest alignment within data payload types
++ * @cpu: processor id
++ */
++static inline
++void lib_ring_buffer_ctx_init(struct lib_ring_buffer_ctx *ctx,
++ struct channel *chan, void *priv,
++ size_t data_size, int largest_align,
++ int cpu)
++{
++ ctx->chan = chan;
++ ctx->priv = priv;
++ ctx->data_size = data_size;
++ ctx->largest_align = largest_align;
++ ctx->cpu = cpu;
++ ctx->rflags = 0;
++}
++
++/*
++ * Reservation flags.
++ *
++ * RING_BUFFER_RFLAG_FULL_TSC
++ *
++ * This flag is passed to record_header_size() and to the primitive used to
++ * write the record header. It indicates that the full 64-bit time value is
++ * needed in the record header. If this flag is not set, the record header needs
++ * only to contain "tsc_bits" bit of time value.
++ *
++ * Reservation flags can be added by the client, starting from
++ * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
++ * record_header_size() to lib_ring_buffer_write_record_header().
++ */
++#define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
++#define RING_BUFFER_RFLAG_END (1U << 1)
++
++#ifndef LTTNG_TRACER_CORE_H
++#error "lttng-tracer-core.h is needed for RING_BUFFER_ALIGN define"
++#endif
++
++/*
++ * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
++ * compile-time. We have to duplicate the "config->align" information and the
++ * definition here because config->align is used both in the slow and fast
++ * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
++ */
++#ifdef RING_BUFFER_ALIGN
++
++# define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
++
++/*
++ * Calculate the offset needed to align the type.
++ * size_of_type must be non-zero.
++ */
++static inline
++unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
++{
++ return offset_align(align_drift, size_of_type);
++}
++
++#else
++
++# define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
++
++/*
++ * Calculate the offset needed to align the type.
++ * size_of_type must be non-zero.
++ */
++static inline
++unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
++{
++ return 0;
++}
++
++#endif
++
++/**
++ * lib_ring_buffer_align_ctx - Align context offset on "alignment"
++ * @ctx: ring buffer context.
++ */
++static inline
++void lib_ring_buffer_align_ctx(struct lib_ring_buffer_ctx *ctx,
++ size_t alignment)
++{
++ ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
++ alignment);
++}
++
++/*
++ * lib_ring_buffer_check_config() returns 0 on success.
++ * Used internally to check for valid configurations at channel creation.
++ */
++static inline
++int lib_ring_buffer_check_config(const struct lib_ring_buffer_config *config,
++ unsigned int switch_timer_interval,
++ unsigned int read_timer_interval)
++{
++ if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
++ && config->sync == RING_BUFFER_SYNC_PER_CPU
++ && switch_timer_interval)
++ return -EINVAL;
++ return 0;
++}
++
++#include "../../wrapper/ringbuffer/vatomic.h"
++
++#endif /* _LIB_RING_BUFFER_CONFIG_H */
+--- /dev/null
++++ b/drivers/staging/lttng/lib/ringbuffer/frontend.h
+@@ -0,0 +1,240 @@
++#ifndef _LIB_RING_BUFFER_FRONTEND_H
++#define _LIB_RING_BUFFER_FRONTEND_H
++
++/*
++ * lib/ringbuffer/frontend.h
++ *
++ * Ring Buffer Library Synchronization Header (API).
++ *
++ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ *
++ * Author:
++ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * See ring_buffer_frontend.c for more information on wait-free algorithms.
++ */
++
++#include <linux/pipe_fs_i.h>
++#include <linux/rcupdate.h>
++#include <linux/cpumask.h>
++#include <linux/module.h>
++#include <linux/bitops.h>
++#include <linux/splice.h>
++#include <linux/string.h>
++#include <linux/timer.h>
++#include <linux/sched.h>
++#include <linux/cache.h>
++#include <linux/time.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/stat.h>
++#include <linux/cpu.h>
++#include <linux/fs.h>
++
++#include <asm/atomic.h>
++#include <asm/local.h>
++
++/* Internal helpers */
++#include "../../wrapper/ringbuffer/frontend_internal.h"
++
++/* Buffer creation/removal and setup operations */
++
++/*
++ * switch_timer_interval is the time interval (in us) to fill sub-buffers with
++ * padding to let readers get those sub-buffers. Used for live streaming.
++ *
++ * read_timer_interval is the time interval (in us) to wake up pending readers.
++ *
++ * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
++ * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
++ * be set to NULL for other backends.
++ */
++
++extern
++struct channel *channel_create(const struct lib_ring_buffer_config *config,
++ const char *name, void *priv,
++ void *buf_addr,
++ size_t subbuf_size, size_t num_subbuf,
++ unsigned int switch_timer_interval,
++ unsigned int read_timer_interval);
++
++/*
++ * channel_destroy returns the private data pointer. It finalizes all channel's
++ * buffers, waits for readers to release all references, and destroys the
++ * channel.
++ */
++extern
++void *channel_destroy(struct channel *chan);
++
++
++/* Buffer read operations */
++
++/*
++ * Iteration on channel cpumask needs to issue a read barrier to match the write
++ * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
++ * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
++ * only performed at channel destruction.
++ */
++#define for_each_channel_cpu(cpu, chan) \
++ for ((cpu) = -1; \
++ ({ (cpu) = cpumask_next(cpu, (chan)->backend.cpumask); \
++ smp_read_barrier_depends(); (cpu) < nr_cpu_ids; });)
++
++extern struct lib_ring_buffer *channel_get_ring_buffer(
++ const struct lib_ring_buffer_config *config,
++ struct channel *chan, int cpu);
++extern int lib_ring_buffer_open_read(struct lib_ring_buffer *buf);
++extern void lib_ring_buffer_release_read(struct lib_ring_buffer *buf);
++
++/*
++ * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
++ */
++extern int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
++ unsigned long *consumed,
++ unsigned long *produced);
++extern void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
++ unsigned long consumed_new);
++
++extern int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
++ unsigned long consumed);
++extern void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf);
++
++/*
++ * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
++ * to read sub-buffers sequentially.
++ */
++static inline int lib_ring_buffer_get_next_subbuf(struct lib_ring_buffer *buf)
++{
++ int ret;
++
++ ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
++ &buf->prod_snapshot);
++ if (ret)
++ return ret;
++ ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot);
++ return ret;
++}
++
++static inline void lib_ring_buffer_put_next_subbuf(struct lib_ring_buffer *buf)
++{
++ lib_ring_buffer_put_subbuf(buf);
++ lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot,
++ buf->backend.chan));
++}
++
++extern void channel_reset(struct channel *chan);
++extern void lib_ring_buffer_reset(struct lib_ring_buffer *buf);
++
++static inline
++unsigned long lib_ring_buffer_get_offset(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf)
++{
++ return v_read(config, &buf->offset);
++}
++
++static inline
++unsigned long lib_ring_buffer_get_consumed(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf)
++{
++ return atomic_long_read(&buf->consumed);
++}
++
++/*
++ * Must call lib_ring_buffer_is_finalized before reading counters (memory
++ * ordering enforced with respect to trace teardown).
++ */
++static inline
++int lib_ring_buffer_is_finalized(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf)
++{
++ int finalized = ACCESS_ONCE(buf->finalized);
++ /*
++ * Read finalized before counters.
++ */
++ smp_rmb();
++ return finalized;
++}
++
++static inline
++int lib_ring_buffer_channel_is_finalized(const struct channel *chan)
++{
++ return chan->finalized;
++}
++
++static inline
++int lib_ring_buffer_channel_is_disabled(const struct channel *chan)
++{
++ return atomic_read(&chan->record_disabled);
++}
++
++static inline
++unsigned long lib_ring_buffer_get_read_data_size(
++ const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf)
++{
++ return subbuffer_get_read_data_size(config, &buf->backend);
++}
++
++static inline
++unsigned long lib_ring_buffer_get_records_count(
++ const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf)
++{
++ return v_read(config, &buf->records_count);
++}
++
++static inline
++unsigned long lib_ring_buffer_get_records_overrun(
++ const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf)
++{
++ return v_read(config, &buf->records_overrun);
++}
++
++static inline
++unsigned long lib_ring_buffer_get_records_lost_full(
++ const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf)
++{
++ return v_read(config, &buf->records_lost_full);
++}
++
++static inline
++unsigned long lib_ring_buffer_get_records_lost_wrap(
++ const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf)
++{
++ return v_read(config, &buf->records_lost_wrap);
++}
++
++static inline
++unsigned long lib_ring_buffer_get_records_lost_big(
++ const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf)
++{
++ return v_read(config, &buf->records_lost_big);
++}
++
++static inline
++unsigned long lib_ring_buffer_get_records_read(
++ const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf)
++{
++ return v_read(config, &buf->backend.records_read);
++}
++
++#endif /* _LIB_RING_BUFFER_FRONTEND_H */
+--- /dev/null
++++ b/drivers/staging/lttng/lib/ringbuffer/frontend_api.h
+@@ -0,0 +1,371 @@
++#ifndef _LIB_RING_BUFFER_FRONTEND_API_H
++#define _LIB_RING_BUFFER_FRONTEND_API_H
++
++/*
++ * lib/ringbuffer/frontend_api.h
++ *
++ * Ring Buffer Library Synchronization Header (buffer write API).
++ *
++ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ *
++ * Author:
++ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * See ring_buffer_frontend.c for more information on wait-free algorithms.
++ * See linux/ringbuffer/frontend.h for channel allocation and read-side API.
++ */
++
++#include "../../wrapper/ringbuffer/frontend.h"
++#include <linux/errno.h>
++#include <linux/prefetch.h>
++
++/**
++ * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
++ *
++ * Disables preemption (acts as a RCU read-side critical section) and keeps a
++ * ring buffer nesting count as supplementary safety net to ensure tracer client
++ * code will never trigger an endless recursion. Returns the processor ID on
++ * success, -EPERM on failure (nesting count too high).
++ *
++ * asm volatile and "memory" clobber prevent the compiler from moving
++ * instructions out of the ring buffer nesting count. This is required to ensure
++ * that probe side-effects which can cause recursion (e.g. unforeseen traps,
++ * divisions by 0, ...) are triggered within the incremented nesting count
++ * section.
++ */
++static inline
++int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config)
++{
++ int cpu, nesting;
++
++ rcu_read_lock_sched_notrace();
++ cpu = smp_processor_id();
++ nesting = ++per_cpu(lib_ring_buffer_nesting, cpu);
++ barrier();
++
++ if (unlikely(nesting > 4)) {
++ WARN_ON_ONCE(1);
++ per_cpu(lib_ring_buffer_nesting, cpu)--;
++ rcu_read_unlock_sched_notrace();
++ return -EPERM;
++ } else
++ return cpu;
++}
++
++/**
++ * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit.
++ */
++static inline
++void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
++{
++ barrier();
++ __get_cpu_var(lib_ring_buffer_nesting)--;
++ rcu_read_unlock_sched_notrace();
++}
++
++/*
++ * lib_ring_buffer_try_reserve is called by lib_ring_buffer_reserve(). It is not
++ * part of the API per se.
++ *
++ * returns 0 if reserve ok, or 1 if the slow path must be taken.
++ */
++static inline
++int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer_ctx *ctx,
++ unsigned long *o_begin, unsigned long *o_end,
++ unsigned long *o_old, size_t *before_hdr_pad)
++{
++ struct channel *chan = ctx->chan;
++ struct lib_ring_buffer *buf = ctx->buf;
++ *o_begin = v_read(config, &buf->offset);
++ *o_old = *o_begin;
++
++ ctx->tsc = lib_ring_buffer_clock_read(chan);
++ if ((int64_t) ctx->tsc == -EIO)
++ return 1;
++
++ /*
++ * Prefetch cacheline for read because we have to read the previous
++ * commit counter to increment it and commit seq value to compare it to
++ * the commit counter.
++ */
++ prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
++
++ if (last_tsc_overflow(config, buf, ctx->tsc))
++ ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
++
++ if (unlikely(subbuf_offset(*o_begin, chan) == 0))
++ return 1;
++
++ ctx->slot_size = record_header_size(config, chan, *o_begin,
++ before_hdr_pad, ctx);
++ ctx->slot_size +=
++ lib_ring_buffer_align(*o_begin + ctx->slot_size,
++ ctx->largest_align) + ctx->data_size;
++ if (unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
++ > chan->backend.subbuf_size))
++ return 1;
++
++ /*
++ * Record fits in the current buffer and we are not on a switch
++ * boundary. It's safe to write.
++ */
++ *o_end = *o_begin + ctx->slot_size;
++
++ if (unlikely((subbuf_offset(*o_end, chan)) == 0))
++ /*
++ * The offset_end will fall at the very beginning of the next
++ * subbuffer.
++ */
++ return 1;
++
++ return 0;
++}
++
++/**
++ * lib_ring_buffer_reserve - Reserve space in a ring buffer.
++ * @config: ring buffer instance configuration.
++ * @ctx: ring buffer context. (input and output) Must be already initialized.
++ *
++ * Atomic wait-free slot reservation. The reserved space starts at the context
++ * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc".
++ *
++ * Return :
++ * 0 on success.
++ * -EAGAIN if channel is disabled.
++ * -ENOSPC if event size is too large for packet.
++ * -ENOBUFS if there is currently not enough space in buffer for the event.
++ * -EIO if data cannot be written into the buffer for any other reason.
++ */
++
++static inline
++int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer_ctx *ctx)
++{
++ struct channel *chan = ctx->chan;
++ struct lib_ring_buffer *buf;
++ unsigned long o_begin, o_end, o_old;
++ size_t before_hdr_pad = 0;
++
++ if (atomic_read(&chan->record_disabled))
++ return -EAGAIN;
++
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
++ buf = per_cpu_ptr(chan->backend.buf, ctx->cpu);
++ else
++ buf = chan->backend.buf;
++ if (atomic_read(&buf->record_disabled))
++ return -EAGAIN;
++ ctx->buf = buf;
++
++ /*
++ * Perform retryable operations.
++ */
++ if (unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
++ &o_end, &o_old, &before_hdr_pad)))
++ goto slow_path;
++
++ if (unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
++ != o_old))
++ goto slow_path;
++
++ /*
++ * Atomically update last_tsc. This update races against concurrent
++ * atomic updates, but the race will always cause supplementary full TSC
++ * record headers, never the opposite (missing a full TSC record header
++ * when it would be needed).
++ */
++ save_last_tsc(config, ctx->buf, ctx->tsc);
++
++ /*
++ * Push the reader if necessary
++ */
++ lib_ring_buffer_reserve_push_reader(ctx->buf, chan, o_end - 1);
++
++ /*
++ * Clear noref flag for this subbuffer.
++ */
++ lib_ring_buffer_clear_noref(config, &ctx->buf->backend,
++ subbuf_index(o_end - 1, chan));
++
++ ctx->pre_offset = o_begin;
++ ctx->buf_offset = o_begin + before_hdr_pad;
++ return 0;
++slow_path:
++ return lib_ring_buffer_reserve_slow(ctx);
++}
++
++/**
++ * lib_ring_buffer_switch - Perform a sub-buffer switch for a per-cpu buffer.
++ * @config: ring buffer instance configuration.
++ * @buf: buffer
++ * @mode: buffer switch mode (SWITCH_ACTIVE or SWITCH_FLUSH)
++ *
++ * This operation is completely reentrant : can be called while tracing is
++ * active with absolutely no lock held.
++ *
++ * Note, however, that as a v_cmpxchg is used for some atomic operations and
++ * requires to be executed locally for per-CPU buffers, this function must be
++ * called from the CPU which owns the buffer for a ACTIVE flush, with preemption
++ * disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
++ */
++static inline
++void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf, enum switch_mode mode)
++{
++ lib_ring_buffer_switch_slow(buf, mode);
++}
++
++/* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */
++
++/**
++ * lib_ring_buffer_commit - Commit an record.
++ * @config: ring buffer instance configuration.
++ * @ctx: ring buffer context. (input arguments only)
++ *
++ * Atomic unordered slot commit. Increments the commit count in the
++ * specified sub-buffer, and delivers it if necessary.
++ */
++static inline
++void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
++ const struct lib_ring_buffer_ctx *ctx)
++{
++ struct channel *chan = ctx->chan;
++ struct lib_ring_buffer *buf = ctx->buf;
++ unsigned long offset_end = ctx->buf_offset;
++ unsigned long endidx = subbuf_index(offset_end - 1, chan);
++ unsigned long commit_count;
++
++ /*
++ * Must count record before incrementing the commit count.
++ */
++ subbuffer_count_record(config, &buf->backend, endidx);
++
++ /*
++ * Order all writes to buffer before the commit count update that will
++ * determine that the subbuffer is full.
++ */
++ if (config->ipi == RING_BUFFER_IPI_BARRIER) {
++ /*
++ * Must write slot data before incrementing commit count. This
++ * compiler barrier is upgraded into a smp_mb() by the IPI sent
++ * by get_subbuf().
++ */
++ barrier();
++ } else
++ smp_wmb();
++
++ v_add(config, ctx->slot_size, &buf->commit_hot[endidx].cc);
++
++ /*
++ * commit count read can race with concurrent OOO commit count updates.
++ * This is only needed for lib_ring_buffer_check_deliver (for
++ * non-polling delivery only) and for
++ * lib_ring_buffer_write_commit_counter. The race can only cause the
++ * counter to be read with the same value more than once, which could
++ * cause :
++ * - Multiple delivery for the same sub-buffer (which is handled
++ * gracefully by the reader code) if the value is for a full
++ * sub-buffer. It's important that we can never miss a sub-buffer
++ * delivery. Re-reading the value after the v_add ensures this.
++ * - Reading a commit_count with a higher value that what was actually
++ * added to it for the lib_ring_buffer_write_commit_counter call
++ * (again caused by a concurrent committer). It does not matter,
++ * because this function is interested in the fact that the commit
++ * count reaches back the reserve offset for a specific sub-buffer,
++ * which is completely independent of the order.
++ */
++ commit_count = v_read(config, &buf->commit_hot[endidx].cc);
++
++ lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
++ commit_count, endidx, ctx->tsc);
++ /*
++ * Update used size at each commit. It's needed only for extracting
++ * ring_buffer buffers from vmcore, after crash.
++ */
++ lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
++ ctx->buf_offset, commit_count,
++ ctx->slot_size);
++}
++
++/**
++ * lib_ring_buffer_try_discard_reserve - Try discarding a record.
++ * @config: ring buffer instance configuration.
++ * @ctx: ring buffer context. (input arguments only)
++ *
++ * Only succeeds if no other record has been written after the record to
++ * discard. If discard fails, the record must be committed to the buffer.
++ *
++ * Returns 0 upon success, -EPERM if the record cannot be discarded.
++ */
++static inline
++int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config,
++ const struct lib_ring_buffer_ctx *ctx)
++{
++ struct lib_ring_buffer *buf = ctx->buf;
++ unsigned long end_offset = ctx->pre_offset + ctx->slot_size;
++
++ /*
++ * We need to ensure that if the cmpxchg succeeds and discards the
++ * record, the next record will record a full TSC, because it cannot
++ * rely on the last_tsc associated with the discarded record to detect
++ * overflows. The only way to ensure this is to set the last_tsc to 0
++ * (assuming no 64-bit TSC overflow), which forces to write a 64-bit
++ * timestamp in the next record.
++ *
++ * Note: if discard fails, we must leave the TSC in the record header.
++ * It is needed to keep track of TSC overflows for the following
++ * records.
++ */
++ save_last_tsc(config, buf, 0ULL);
++
++ if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
++ != end_offset))
++ return -EPERM;
++ else
++ return 0;
++}
++
++static inline
++void channel_record_disable(const struct lib_ring_buffer_config *config,
++ struct channel *chan)
++{
++ atomic_inc(&chan->record_disabled);
++}
++
++static inline
++void channel_record_enable(const struct lib_ring_buffer_config *config,
++ struct channel *chan)
++{
++ atomic_dec(&chan->record_disabled);
++}
++
++static inline
++void lib_ring_buffer_record_disable(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf)
++{
++ atomic_inc(&buf->record_disabled);
++}
++
++static inline
++void lib_ring_buffer_record_enable(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf)
++{
++ atomic_dec(&buf->record_disabled);
++}
++
++#endif /* _LIB_RING_BUFFER_FRONTEND_API_H */
+--- /dev/null
++++ b/drivers/staging/lttng/lib/ringbuffer/frontend_internal.h
+@@ -0,0 +1,456 @@
++#ifndef _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
++#define _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
++
++/*
++ * linux/ringbuffer/frontend_internal.h
++ *
++ * Ring Buffer Library Synchronization Header (internal helpers).
++ *
++ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ *
++ * Author:
++ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * See ring_buffer_frontend.c for more information on wait-free algorithms.
++ */
++
++#include "../../wrapper/ringbuffer/config.h"
++#include "../../wrapper/ringbuffer/backend_types.h"
++#include "../../wrapper/ringbuffer/frontend_types.h"
++#include "../../lib/prio_heap/lttng_prio_heap.h" /* For per-CPU read-side iterator */
++
++/* Buffer offset macros */
++
++/* buf_trunc mask selects only the buffer number. */
++static inline
++unsigned long buf_trunc(unsigned long offset, struct channel *chan)
++{
++ return offset & ~(chan->backend.buf_size - 1);
++
++}
++
++/* Select the buffer number value (counter). */
++static inline
++unsigned long buf_trunc_val(unsigned long offset, struct channel *chan)
++{
++ return buf_trunc(offset, chan) >> chan->backend.buf_size_order;
++}
++
++/* buf_offset mask selects only the offset within the current buffer. */
++static inline
++unsigned long buf_offset(unsigned long offset, struct channel *chan)
++{
++ return offset & (chan->backend.buf_size - 1);
++}
++
++/* subbuf_offset mask selects the offset within the current subbuffer. */
++static inline
++unsigned long subbuf_offset(unsigned long offset, struct channel *chan)
++{
++ return offset & (chan->backend.subbuf_size - 1);
++}
++
++/* subbuf_trunc mask selects the subbuffer number. */
++static inline
++unsigned long subbuf_trunc(unsigned long offset, struct channel *chan)
++{
++ return offset & ~(chan->backend.subbuf_size - 1);
++}
++
++/* subbuf_align aligns the offset to the next subbuffer. */
++static inline
++unsigned long subbuf_align(unsigned long offset, struct channel *chan)
++{
++ return (offset + chan->backend.subbuf_size)
++ & ~(chan->backend.subbuf_size - 1);
++}
++
++/* subbuf_index returns the index of the current subbuffer within the buffer. */
++static inline
++unsigned long subbuf_index(unsigned long offset, struct channel *chan)
++{
++ return buf_offset(offset, chan) >> chan->backend.subbuf_size_order;
++}
++
++/*
++ * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
++ * bits from the last TSC read. When overflows are detected, the full 64-bit
++ * timestamp counter should be written in the record header. Reads and writes
++ * last_tsc atomically.
++ */
++
++#if (BITS_PER_LONG == 32)
++static inline
++void save_last_tsc(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf, u64 tsc)
++{
++ if (config->tsc_bits == 0 || config->tsc_bits == 64)
++ return;
++
++ /*
++ * Ensure the compiler performs this update in a single instruction.
++ */
++ v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits));
++}
++
++static inline
++int last_tsc_overflow(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf, u64 tsc)
++{
++ unsigned long tsc_shifted;
++
++ if (config->tsc_bits == 0 || config->tsc_bits == 64)
++ return 0;
++
++ tsc_shifted = (unsigned long)(tsc >> config->tsc_bits);
++ if (unlikely(tsc_shifted
++ - (unsigned long)v_read(config, &buf->last_tsc)))
++ return 1;
++ else
++ return 0;
++}
++#else
++static inline
++void save_last_tsc(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf, u64 tsc)
++{
++ if (config->tsc_bits == 0 || config->tsc_bits == 64)
++ return;
++
++ v_set(config, &buf->last_tsc, (unsigned long)tsc);
++}
++
++static inline
++int last_tsc_overflow(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf, u64 tsc)
++{
++ if (config->tsc_bits == 0 || config->tsc_bits == 64)
++ return 0;
++
++ if (unlikely((tsc - v_read(config, &buf->last_tsc))
++ >> config->tsc_bits))
++ return 1;
++ else
++ return 0;
++}
++#endif
++
++extern
++int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx);
++
++extern
++void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf,
++ enum switch_mode mode);
++
++extern
++void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf);
++
++/* Buffer write helpers */
++
++static inline
++void lib_ring_buffer_reserve_push_reader(struct lib_ring_buffer *buf,
++ struct channel *chan,
++ unsigned long offset)
++{
++ unsigned long consumed_old, consumed_new;
++
++ do {
++ consumed_old = atomic_long_read(&buf->consumed);
++ /*
++ * If buffer is in overwrite mode, push the reader consumed
++ * count if the write position has reached it and we are not
++ * at the first iteration (don't push the reader farther than
++ * the writer). This operation can be done concurrently by many
++ * writers in the same buffer, the writer being at the farthest
++ * write position sub-buffer index in the buffer being the one
++ * which will win this loop.
++ */
++ if (unlikely(subbuf_trunc(offset, chan)
++ - subbuf_trunc(consumed_old, chan)
++ >= chan->backend.buf_size))
++ consumed_new = subbuf_align(consumed_old, chan);
++ else
++ return;
++ } while (unlikely(atomic_long_cmpxchg(&buf->consumed, consumed_old,
++ consumed_new) != consumed_old));
++}
++
++static inline
++void lib_ring_buffer_vmcore_check_deliver(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf,
++ unsigned long commit_count,
++ unsigned long idx)
++{
++ if (config->oops == RING_BUFFER_OOPS_CONSISTENCY)
++ v_set(config, &buf->commit_hot[idx].seq, commit_count);
++}
++
++static inline
++int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf,
++ struct channel *chan)
++{
++ unsigned long consumed_old, consumed_idx, commit_count, write_offset;
++
++ consumed_old = atomic_long_read(&buf->consumed);
++ consumed_idx = subbuf_index(consumed_old, chan);
++ commit_count = v_read(config, &buf->commit_cold[consumed_idx].cc_sb);
++ /*
++ * No memory barrier here, since we are only interested
++ * in a statistically correct polling result. The next poll will
++ * get the data is we are racing. The mb() that ensures correct
++ * memory order is in get_subbuf.
++ */
++ write_offset = v_read(config, &buf->offset);
++
++ /*
++ * Check that the subbuffer we are trying to consume has been
++ * already fully committed.
++ */
++
++ if (((commit_count - chan->backend.subbuf_size)
++ & chan->commit_count_mask)
++ - (buf_trunc(consumed_old, chan)
++ >> chan->backend.num_subbuf_order)
++ != 0)
++ return 0;
++
++ /*
++ * Check that we are not about to read the same subbuffer in
++ * which the writer head is.
++ */
++ if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_old, chan)
++ == 0)
++ return 0;
++
++ return 1;
++
++}
++
++static inline
++int lib_ring_buffer_pending_data(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf,
++ struct channel *chan)
++{
++ return !!subbuf_offset(v_read(config, &buf->offset), chan);
++}
++
++static inline
++unsigned long lib_ring_buffer_get_data_size(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf,
++ unsigned long idx)
++{
++ return subbuffer_get_data_size(config, &buf->backend, idx);
++}
++
++/*
++ * Check if all space reservation in a buffer have been committed. This helps
++ * knowing if an execution context is nested (for per-cpu buffers only).
++ * This is a very specific ftrace use-case, so we keep this as "internal" API.
++ */
++static inline
++int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf,
++ struct channel *chan)
++{
++ unsigned long offset, idx, commit_count;
++
++ CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU);
++ CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU);
++
++ /*
++ * Read offset and commit count in a loop so they are both read
++ * atomically wrt interrupts. By deal with interrupt concurrency by
++ * restarting both reads if the offset has been pushed. Note that given
++ * we only have to deal with interrupt concurrency here, an interrupt
++ * modifying the commit count will also modify "offset", so it is safe
++ * to only check for offset modifications.
++ */
++ do {
++ offset = v_read(config, &buf->offset);
++ idx = subbuf_index(offset, chan);
++ commit_count = v_read(config, &buf->commit_hot[idx].cc);
++ } while (offset != v_read(config, &buf->offset));
++
++ return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
++ - (commit_count & chan->commit_count_mask) == 0);
++}
++
++/*
++ * Receive end of subbuffer TSC as parameter. It has been read in the
++ * space reservation loop of either reserve or switch, which ensures it
++ * progresses monotonically with event records in the buffer. Therefore,
++ * it ensures that the end timestamp of a subbuffer is <= begin
++ * timestamp of the following subbuffers.
++ */
++static inline
++void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf,
++ struct channel *chan,
++ unsigned long offset,
++ unsigned long commit_count,
++ unsigned long idx,
++ u64 tsc)
++{
++ unsigned long old_commit_count = commit_count
++ - chan->backend.subbuf_size;
++
++ /* Check if all commits have been done */
++ if (unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
++ - (old_commit_count & chan->commit_count_mask) == 0)) {
++ /*
++ * If we succeeded at updating cc_sb below, we are the subbuffer
++ * writer delivering the subbuffer. Deals with concurrent
++ * updates of the "cc" value without adding a add_return atomic
++ * operation to the fast path.
++ *
++ * We are doing the delivery in two steps:
++ * - First, we cmpxchg() cc_sb to the new value
++ * old_commit_count + 1. This ensures that we are the only
++ * subbuffer user successfully filling the subbuffer, but we
++ * do _not_ set the cc_sb value to "commit_count" yet.
++ * Therefore, other writers that would wrap around the ring
++ * buffer and try to start writing to our subbuffer would
++ * have to drop records, because it would appear as
++ * non-filled.
++ * We therefore have exclusive access to the subbuffer control
++ * structures. This mutual exclusion with other writers is
++ * crucially important to perform record overruns count in
++ * flight recorder mode locklessly.
++ * - When we are ready to release the subbuffer (either for
++ * reading or for overrun by other writers), we simply set the
++ * cc_sb value to "commit_count" and perform delivery.
++ *
++ * The subbuffer size is least 2 bytes (minimum size: 1 page).
++ * This guarantees that old_commit_count + 1 != commit_count.
++ */
++
++ /*
++ * Order prior updates to reserve count prior to the
++ * commit_cold cc_sb update.
++ */
++ smp_wmb();
++ if (likely(v_cmpxchg(config, &buf->commit_cold[idx].cc_sb,
++ old_commit_count, old_commit_count + 1)
++ == old_commit_count)) {
++ /*
++ * Start of exclusive subbuffer access. We are
++ * guaranteed to be the last writer in this subbuffer
++ * and any other writer trying to access this subbuffer
++ * in this state is required to drop records.
++ */
++ v_add(config,
++ subbuffer_get_records_count(config,
++ &buf->backend, idx),
++ &buf->records_count);
++ v_add(config,
++ subbuffer_count_records_overrun(config,
++ &buf->backend,
++ idx),
++ &buf->records_overrun);
++ config->cb.buffer_end(buf, tsc, idx,
++ lib_ring_buffer_get_data_size(config,
++ buf,
++ idx));
++
++ /*
++ * Set noref flag and offset for this subbuffer id.
++ * Contains a memory barrier that ensures counter stores
++ * are ordered before set noref and offset.
++ */
++ lib_ring_buffer_set_noref_offset(config, &buf->backend, idx,
++ buf_trunc_val(offset, chan));
++
++ /*
++ * Order set_noref and record counter updates before the
++ * end of subbuffer exclusive access. Orders with
++ * respect to writers coming into the subbuffer after
++ * wrap around, and also order wrt concurrent readers.
++ */
++ smp_mb();
++ /* End of exclusive subbuffer access */
++ v_set(config, &buf->commit_cold[idx].cc_sb,
++ commit_count);
++ /*
++ * Order later updates to reserve count after
++ * the commit_cold cc_sb update.
++ */
++ smp_wmb();
++ lib_ring_buffer_vmcore_check_deliver(config, buf,
++ commit_count, idx);
++
++ /*
++ * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free.
++ */
++ if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER
++ && atomic_long_read(&buf->active_readers)
++ && lib_ring_buffer_poll_deliver(config, buf, chan)) {
++ wake_up_interruptible(&buf->read_wait);
++ wake_up_interruptible(&chan->read_wait);
++ }
++
++ }
++ }
++}
++
++/*
++ * lib_ring_buffer_write_commit_counter
++ *
++ * For flight recording. must be called after commit.
++ * This function increments the subbuffer's commit_seq counter each time the
++ * commit count reaches back the reserve offset (modulo subbuffer size). It is
++ * useful for crash dump.
++ */
++static inline
++void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer *buf,
++ struct channel *chan,
++ unsigned long idx,
++ unsigned long buf_offset,
++ unsigned long commit_count,
++ size_t slot_size)
++{
++ unsigned long offset, commit_seq_old;
++
++ if (config->oops != RING_BUFFER_OOPS_CONSISTENCY)
++ return;
++
++ offset = buf_offset + slot_size;
++
++ /*
++ * subbuf_offset includes commit_count_mask. We can simply
++ * compare the offsets within the subbuffer without caring about
++ * buffer full/empty mismatch because offset is never zero here
++ * (subbuffer header and record headers have non-zero length).
++ */
++ if (unlikely(subbuf_offset(offset - commit_count, chan)))
++ return;
++
++ commit_seq_old = v_read(config, &buf->commit_hot[idx].seq);
++ while ((long) (commit_seq_old - commit_count) < 0)
++ commit_seq_old = v_cmpxchg(config, &buf->commit_hot[idx].seq,
++ commit_seq_old, commit_count);
++}
++
++extern int lib_ring_buffer_create(struct lib_ring_buffer *buf,
++ struct channel_backend *chanb, int cpu);
++extern void lib_ring_buffer_free(struct lib_ring_buffer *buf);
++
++/* Keep track of trap nesting inside ring buffer code */
++DECLARE_PER_CPU(unsigned int, lib_ring_buffer_nesting);
++
++#endif /* _LIB_RING_BUFFER_FRONTEND_INTERNAL_H */
+--- /dev/null
++++ b/drivers/staging/lttng/lib/ringbuffer/frontend_types.h
+@@ -0,0 +1,188 @@
++#ifndef _LIB_RING_BUFFER_FRONTEND_TYPES_H
++#define _LIB_RING_BUFFER_FRONTEND_TYPES_H
++
++/*
++ * lib/ringbuffer/frontend_types.h
++ *
++ * Ring Buffer Library Synchronization Header (types).
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ *
++ * Author:
++ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * See ring_buffer_frontend.c for more information on wait-free algorithms.
++ */
++
++#include <linux/kref.h>
++#include "../../wrapper/ringbuffer/config.h"
++#include "../../wrapper/ringbuffer/backend_types.h"
++#include "../../wrapper/spinlock.h"
++#include "../../lib/prio_heap/lttng_prio_heap.h" /* For per-CPU read-side iterator */
++
++/*
++ * A switch is done during tracing or as a final flush after tracing (so it
++ * won't write in the new sub-buffer).
++ */
++enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
++
++/* channel-level read-side iterator */
++struct channel_iter {
++ /* Prio heap of buffers. Lowest timestamps at the top. */
++ struct lttng_ptr_heap heap; /* Heap of struct lib_ring_buffer ptrs */
++ struct list_head empty_head; /* Empty buffers linked-list head */
++ int read_open; /* Opened for reading ? */
++ u64 last_qs; /* Last quiescent state timestamp */
++ u64 last_timestamp; /* Last timestamp (for WARN_ON) */
++ int last_cpu; /* Last timestamp cpu */
++ /*
++ * read() file operation state.
++ */
++ unsigned long len_left;
++};
++
++/* channel: collection of per-cpu ring buffers. */
++struct channel {
++ atomic_t record_disabled;
++ unsigned long commit_count_mask; /*
++ * Commit count mask, removing
++ * the MSBs corresponding to
++ * bits used to represent the
++ * subbuffer index.
++ */
++
++ struct channel_backend backend; /* Associated backend */
++
++ unsigned long switch_timer_interval; /* Buffer flush (jiffies) */
++ unsigned long read_timer_interval; /* Reader wakeup (jiffies) */
++ struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */
++ struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */
++ struct notifier_block hp_iter_notifier; /* hotplug iterator notifier */
++ unsigned int cpu_hp_enable:1; /* Enable CPU hotplug notif. */
++ unsigned int hp_iter_enable:1; /* Enable hp iter notif. */
++ wait_queue_head_t read_wait; /* reader wait queue */
++ wait_queue_head_t hp_wait; /* CPU hotplug wait queue */
++ int finalized; /* Has channel been finalized */
++ struct channel_iter iter; /* Channel read-side iterator */
++ struct kref ref; /* Reference count */
++};
++
++/* Per-subbuffer commit counters used on the hot path */
++struct commit_counters_hot {
++ union v_atomic cc; /* Commit counter */
++ union v_atomic seq; /* Consecutive commits */
++};
++
++/* Per-subbuffer commit counters used only on cold paths */
++struct commit_counters_cold {
++ union v_atomic cc_sb; /* Incremented _once_ at sb switch */
++};
++
++/* Per-buffer read iterator */
++struct lib_ring_buffer_iter {
++ u64 timestamp; /* Current record timestamp */
++ size_t header_len; /* Current record header length */
++ size_t payload_len; /* Current record payload length */
++
++ struct list_head empty_node; /* Linked list of empty buffers */
++ unsigned long consumed, read_offset, data_size;
++ enum {
++ ITER_GET_SUBBUF = 0,
++ ITER_TEST_RECORD,
++ ITER_NEXT_RECORD,
++ ITER_PUT_SUBBUF,
++ } state;
++ unsigned int allocated:1;
++ unsigned int read_open:1; /* Opened for reading ? */
++};
++
++/* ring buffer state */
++struct lib_ring_buffer {
++ /* First 32 bytes cache-hot cacheline */
++ union v_atomic offset; /* Current offset in the buffer */
++ struct commit_counters_hot *commit_hot;
++ /* Commit count per sub-buffer */
++ atomic_long_t consumed; /*
++ * Current offset in the buffer
++ * standard atomic access (shared)
++ */
++ atomic_t record_disabled;
++ /* End of first 32 bytes cacheline */
++ union v_atomic last_tsc; /*
++ * Last timestamp written in the buffer.
++ */
++
++ struct lib_ring_buffer_backend backend; /* Associated backend */
++
++ struct commit_counters_cold *commit_cold;
++ /* Commit count per sub-buffer */
++ atomic_long_t active_readers; /*
++ * Active readers count
++ * standard atomic access (shared)
++ */
++ /* Dropped records */
++ union v_atomic records_lost_full; /* Buffer full */
++ union v_atomic records_lost_wrap; /* Nested wrap-around */
++ union v_atomic records_lost_big; /* Events too big */
++ union v_atomic records_count; /* Number of records written */
++ union v_atomic records_overrun; /* Number of overwritten records */
++ wait_queue_head_t read_wait; /* reader buffer-level wait queue */
++ wait_queue_head_t write_wait; /* writer buffer-level wait queue (for metadata only) */
++ int finalized; /* buffer has been finalized */
++ struct timer_list switch_timer; /* timer for periodical switch */
++ struct timer_list read_timer; /* timer for read poll */
++ raw_spinlock_t raw_tick_nohz_spinlock; /* nohz entry lock/trylock */
++ struct lib_ring_buffer_iter iter; /* read-side iterator */
++ unsigned long get_subbuf_consumed; /* Read-side consumed */
++ unsigned long prod_snapshot; /* Producer count snapshot */
++ unsigned long cons_snapshot; /* Consumer count snapshot */
++ unsigned int get_subbuf:1, /* Sub-buffer being held by reader */
++ switch_timer_enabled:1, /* Protected by ring_buffer_nohz_lock */
++ read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
++};
++
++static inline
++void *channel_get_private(struct channel *chan)
++{
++ return chan->backend.priv;
++}
++
++/*
++ * Issue warnings and disable channels upon internal error.
++ * Can receive struct lib_ring_buffer or struct lib_ring_buffer_backend
++ * parameters.
++ */
++#define CHAN_WARN_ON(c, cond) \
++ ({ \
++ struct channel *__chan; \
++ int _____ret = unlikely(cond); \
++ if (_____ret) { \
++ if (__same_type(*(c), struct channel_backend)) \
++ __chan = container_of((void *) (c), \
++ struct channel, \
++ backend); \
++ else if (__same_type(*(c), struct channel)) \
++ __chan = (void *) (c); \
++ else \
++ BUG_ON(1); \
++ atomic_inc(&__chan->record_disabled); \
++ WARN_ON(1); \
++ } \
++ _____ret; \
++ })
++
++#endif /* _LIB_RING_BUFFER_FRONTEND_TYPES_H */
+--- /dev/null
++++ b/drivers/staging/lttng/lib/ringbuffer/iterator.h
+@@ -0,0 +1,83 @@
++#ifndef _LIB_RING_BUFFER_ITERATOR_H
++#define _LIB_RING_BUFFER_ITERATOR_H
++
++/*
++ * lib/ringbuffer/iterator.h
++ *
++ * Ring buffer and channel iterators.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ *
++ * Author:
++ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ */
++
++#include "../../wrapper/ringbuffer/backend.h"
++#include "../../wrapper/ringbuffer/frontend.h"
++#include "../../wrapper/ringbuffer/vfs.h"
++
++/*
++ * lib_ring_buffer_get_next_record advances the buffer read position to the next
++ * record. It returns either the size of the next record, -EAGAIN if there is
++ * currently no data available, or -ENODATA if no data is available and buffer
++ * is finalized.
++ */
++extern ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
++ struct lib_ring_buffer *buf);
++
++/*
++ * channel_get_next_record advances the buffer read position to the next record.
++ * It returns either the size of the next record, -EAGAIN if there is currently
++ * no data available, or -ENODATA if no data is available and buffer is
++ * finalized.
++ * Returns the current buffer in ret_buf.
++ */
++extern ssize_t channel_get_next_record(struct channel *chan,
++ struct lib_ring_buffer **ret_buf);
++
++/**
++ * read_current_record - copy the buffer current record into dest.
++ * @buf: ring buffer
++ * @dest: destination where the record should be copied
++ *
++ * dest should be large enough to contain the record. Returns the number of
++ * bytes copied.
++ */
++static inline size_t read_current_record(struct lib_ring_buffer *buf, void *dest)
++{
++ return lib_ring_buffer_read(&buf->backend, buf->iter.read_offset,
++ dest, buf->iter.payload_len);
++}
++
++extern int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf);
++extern void lib_ring_buffer_iterator_release(struct lib_ring_buffer *buf);
++extern int channel_iterator_open(struct channel *chan);
++extern void channel_iterator_release(struct channel *chan);
++
++extern const struct file_operations channel_payload_file_operations;
++extern const struct file_operations lib_ring_buffer_payload_file_operations;
++
++/*
++ * Used internally.
++ */
++int channel_iterator_init(struct channel *chan);
++void channel_iterator_unregister_notifiers(struct channel *chan);
++void channel_iterator_free(struct channel *chan);
++void channel_iterator_reset(struct channel *chan);
++void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf);
++
++#endif /* _LIB_RING_BUFFER_ITERATOR_H */
+--- /dev/null
++++ b/drivers/staging/lttng/lib/ringbuffer/nohz.h
+@@ -0,0 +1,42 @@
++#ifndef _LIB_RING_BUFFER_NOHZ_H
++#define _LIB_RING_BUFFER_NOHZ_H
++
++/*
++ * lib/ringbuffer/nohz.h
++ *
++ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#ifdef CONFIG_LIB_RING_BUFFER
++void lib_ring_buffer_tick_nohz_flush(void);
++void lib_ring_buffer_tick_nohz_stop(void);
++void lib_ring_buffer_tick_nohz_restart(void);
++#else
++static inline void lib_ring_buffer_tick_nohz_flush(void)
++{
++}
++
++static inline void lib_ring_buffer_tick_nohz_stop(void)
++{
++}
++
++static inline void lib_ring_buffer_tick_nohz_restart(void)
++{
++}
++#endif
++
++#endif /* _LIB_RING_BUFFER_NOHZ_H */
+--- /dev/null
++++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_backend.c
+@@ -0,0 +1,869 @@
++/*
++ * ring_buffer_backend.c
++ *
++ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/stddef.h>
++#include <linux/module.h>
++#include <linux/string.h>
++#include <linux/bitops.h>
++#include <linux/delay.h>
++#include <linux/errno.h>
++#include <linux/slab.h>
++#include <linux/cpu.h>
++#include <linux/mm.h>
++
++#include "../../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
++#include "../../wrapper/ringbuffer/config.h"
++#include "../../wrapper/ringbuffer/backend.h"
++#include "../../wrapper/ringbuffer/frontend.h"
++
++/**
++ * lib_ring_buffer_backend_allocate - allocate a channel buffer
++ * @config: ring buffer instance configuration
++ * @buf: the buffer struct
++ * @size: total size of the buffer
++ * @num_subbuf: number of subbuffers
++ * @extra_reader_sb: need extra subbuffer for reader
++ */
++static
++int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer_backend *bufb,
++ size_t size, size_t num_subbuf,
++ int extra_reader_sb)
++{
++ struct channel_backend *chanb = &bufb->chan->backend;
++ unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0;
++ unsigned long subbuf_size, mmap_offset = 0;
++ unsigned long num_subbuf_alloc;
++ struct page **pages;
++ void **virt;
++ unsigned long i;
++
++ num_pages = size >> PAGE_SHIFT;
++ num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
++ subbuf_size = chanb->subbuf_size;
++ num_subbuf_alloc = num_subbuf;
++
++ if (extra_reader_sb) {
++ num_pages += num_pages_per_subbuf; /* Add pages for reader */
++ num_subbuf_alloc++;
++ }
++
++ pages = kmalloc_node(ALIGN(sizeof(*pages) * num_pages,
++ 1 << INTERNODE_CACHE_SHIFT),
++ GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
++ if (unlikely(!pages))
++ goto pages_error;
++
++ virt = kmalloc_node(ALIGN(sizeof(*virt) * num_pages,
++ 1 << INTERNODE_CACHE_SHIFT),
++ GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
++ if (unlikely(!virt))
++ goto virt_error;
++
++ bufb->array = kmalloc_node(ALIGN(sizeof(*bufb->array)
++ * num_subbuf_alloc,
++ 1 << INTERNODE_CACHE_SHIFT),
++ GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
++ if (unlikely(!bufb->array))
++ goto array_error;
++
++ for (i = 0; i < num_pages; i++) {
++ pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
++ GFP_KERNEL | __GFP_ZERO, 0);
++ if (unlikely(!pages[i]))
++ goto depopulate;
++ virt[i] = page_address(pages[i]);
++ }
++ bufb->num_pages_per_subbuf = num_pages_per_subbuf;
++
++ /* Allocate backend pages array elements */
++ for (i = 0; i < num_subbuf_alloc; i++) {
++ bufb->array[i] =
++ kzalloc_node(ALIGN(
++ sizeof(struct lib_ring_buffer_backend_pages) +
++ sizeof(struct lib_ring_buffer_backend_page)
++ * num_pages_per_subbuf,
++ 1 << INTERNODE_CACHE_SHIFT),
++ GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
++ if (!bufb->array[i])
++ goto free_array;
++ }
++
++ /* Allocate write-side subbuffer table */
++ bufb->buf_wsb = kzalloc_node(ALIGN(
++ sizeof(struct lib_ring_buffer_backend_subbuffer)
++ * num_subbuf,
++ 1 << INTERNODE_CACHE_SHIFT),
++ GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
++ if (unlikely(!bufb->buf_wsb))
++ goto free_array;
++
++ for (i = 0; i < num_subbuf; i++)
++ bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
++
++ /* Assign read-side subbuffer table */
++ if (extra_reader_sb)
++ bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
++ num_subbuf_alloc - 1);
++ else
++ bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
++
++ /* Assign pages to page index */
++ for (i = 0; i < num_subbuf_alloc; i++) {
++ for (j = 0; j < num_pages_per_subbuf; j++) {
++ CHAN_WARN_ON(chanb, page_idx > num_pages);
++ bufb->array[i]->p[j].virt = virt[page_idx];
++ bufb->array[i]->p[j].page = pages[page_idx];
++ page_idx++;
++ }
++ if (config->output == RING_BUFFER_MMAP) {
++ bufb->array[i]->mmap_offset = mmap_offset;
++ mmap_offset += subbuf_size;
++ }
++ }
++
++ /*
++ * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
++ * will not fault.
++ */
++ wrapper_vmalloc_sync_all();
++ kfree(virt);
++ kfree(pages);
++ return 0;
++
++free_array:
++ for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
++ kfree(bufb->array[i]);
++depopulate:
++ /* Free all allocated pages */
++ for (i = 0; (i < num_pages && pages[i]); i++)
++ __free_page(pages[i]);
++ kfree(bufb->array);
++array_error:
++ kfree(virt);
++virt_error:
++ kfree(pages);
++pages_error:
++ return -ENOMEM;
++}
++
++int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
++ struct channel_backend *chanb, int cpu)
++{
++ const struct lib_ring_buffer_config *config = &chanb->config;
++
++ bufb->chan = container_of(chanb, struct channel, backend);
++ bufb->cpu = cpu;
++
++ return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
++ chanb->num_subbuf,
++ chanb->extra_reader_sb);
++}
++
++void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
++{
++ struct channel_backend *chanb = &bufb->chan->backend;
++ unsigned long i, j, num_subbuf_alloc;
++
++ num_subbuf_alloc = chanb->num_subbuf;
++ if (chanb->extra_reader_sb)
++ num_subbuf_alloc++;
++
++ kfree(bufb->buf_wsb);
++ for (i = 0; i < num_subbuf_alloc; i++) {
++ for (j = 0; j < bufb->num_pages_per_subbuf; j++)
++ __free_page(bufb->array[i]->p[j].page);
++ kfree(bufb->array[i]);
++ }
++ kfree(bufb->array);
++ bufb->allocated = 0;
++}
++
++void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
++{
++ struct channel_backend *chanb = &bufb->chan->backend;
++ const struct lib_ring_buffer_config *config = &chanb->config;
++ unsigned long num_subbuf_alloc;
++ unsigned int i;
++
++ num_subbuf_alloc = chanb->num_subbuf;
++ if (chanb->extra_reader_sb)
++ num_subbuf_alloc++;
++
++ for (i = 0; i < chanb->num_subbuf; i++)
++ bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
++ if (chanb->extra_reader_sb)
++ bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
++ num_subbuf_alloc - 1);
++ else
++ bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
++
++ for (i = 0; i < num_subbuf_alloc; i++) {
++ /* Don't reset mmap_offset */
++ v_set(config, &bufb->array[i]->records_commit, 0);
++ v_set(config, &bufb->array[i]->records_unread, 0);
++ bufb->array[i]->data_size = 0;
++ /* Don't reset backend page and virt addresses */
++ }
++ /* Don't reset num_pages_per_subbuf, cpu, allocated */
++ v_set(config, &bufb->records_read, 0);
++}
++
++/*
++ * The frontend is responsible for also calling ring_buffer_backend_reset for
++ * each buffer when calling channel_backend_reset.
++ */
++void channel_backend_reset(struct channel_backend *chanb)
++{
++ struct channel *chan = container_of(chanb, struct channel, backend);
++ const struct lib_ring_buffer_config *config = &chanb->config;
++
++ /*
++ * Don't reset buf_size, subbuf_size, subbuf_size_order,
++ * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
++ * priv, notifiers, config, cpumask and name.
++ */
++ chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++/**
++ * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
++ * @nb: notifier block
++ * @action: hotplug action to take
++ * @hcpu: CPU number
++ *
++ * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
++ */
++static
++int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
++ unsigned long action,
++ void *hcpu)
++{
++ unsigned int cpu = (unsigned long)hcpu;
++ struct channel_backend *chanb = container_of(nb, struct channel_backend,
++ cpu_hp_notifier);
++ const struct lib_ring_buffer_config *config = &chanb->config;
++ struct lib_ring_buffer *buf;
++ int ret;
++
++ CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
++
++ switch (action) {
++ case CPU_UP_PREPARE:
++ case CPU_UP_PREPARE_FROZEN:
++ buf = per_cpu_ptr(chanb->buf, cpu);
++ ret = lib_ring_buffer_create(buf, chanb, cpu);
++ if (ret) {
++ printk(KERN_ERR
++ "ring_buffer_cpu_hp_callback: cpu %d "
++ "buffer creation failed\n", cpu);
++ return NOTIFY_BAD;
++ }
++ break;
++ case CPU_DEAD:
++ case CPU_DEAD_FROZEN:
++ /* No need to do a buffer switch here, because it will happen
++ * when tracing is stopped, or will be done by switch timer CPU
++ * DEAD callback. */
++ break;
++ }
++ return NOTIFY_OK;
++}
++#endif
++
++/**
++ * channel_backend_init - initialize a channel backend
++ * @chanb: channel backend
++ * @name: channel name
++ * @config: client ring buffer configuration
++ * @priv: client private data
++ * @parent: dentry of parent directory, %NULL for root directory
++ * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
++ * @num_subbuf: number of sub-buffers (power of 2)
++ *
++ * Returns channel pointer if successful, %NULL otherwise.
++ *
++ * Creates per-cpu channel buffers using the sizes and attributes
++ * specified. The created channel buffer files will be named
++ * name_0...name_N-1. File permissions will be %S_IRUSR.
++ *
++ * Called with CPU hotplug disabled.
++ */
++int channel_backend_init(struct channel_backend *chanb,
++ const char *name,
++ const struct lib_ring_buffer_config *config,
++ void *priv, size_t subbuf_size, size_t num_subbuf)
++{
++ struct channel *chan = container_of(chanb, struct channel, backend);
++ unsigned int i;
++ int ret;
++
++ if (!name)
++ return -EPERM;
++
++ /* Check that the subbuffer size is larger than a page. */
++ if (subbuf_size < PAGE_SIZE)
++ return -EINVAL;
++
++ /*
++ * Make sure the number of subbuffers and subbuffer size are
++ * power of 2 and nonzero.
++ */
++ if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
++ return -EINVAL;
++ if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
++ return -EINVAL;
++
++ ret = subbuffer_id_check_index(config, num_subbuf);
++ if (ret)
++ return ret;
++
++ chanb->priv = priv;
++ chanb->buf_size = num_subbuf * subbuf_size;
++ chanb->subbuf_size = subbuf_size;
++ chanb->buf_size_order = get_count_order(chanb->buf_size);
++ chanb->subbuf_size_order = get_count_order(subbuf_size);
++ chanb->num_subbuf_order = get_count_order(num_subbuf);
++ chanb->extra_reader_sb =
++ (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
++ chanb->num_subbuf = num_subbuf;
++ strlcpy(chanb->name, name, NAME_MAX);
++ memcpy(&chanb->config, config, sizeof(chanb->config));
++
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
++ if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
++ return -ENOMEM;
++ }
++
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
++ /* Allocating the buffer per-cpu structures */
++ chanb->buf = alloc_percpu(struct lib_ring_buffer);
++ if (!chanb->buf)
++ goto free_cpumask;
++
++ /*
++ * In case of non-hotplug cpu, if the ring-buffer is allocated
++ * in early initcall, it will not be notified of secondary cpus.
++ * In that off case, we need to allocate for all possible cpus.
++ */
++#ifdef CONFIG_HOTPLUG_CPU
++ /*
++ * buf->backend.allocated test takes care of concurrent CPU
++ * hotplug.
++ * Priority higher than frontend, so we create the ring buffer
++ * before we start the timer.
++ */
++ chanb->cpu_hp_notifier.notifier_call =
++ lib_ring_buffer_cpu_hp_callback;
++ chanb->cpu_hp_notifier.priority = 5;
++ register_hotcpu_notifier(&chanb->cpu_hp_notifier);
++
++ get_online_cpus();
++ for_each_online_cpu(i) {
++ ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
++ chanb, i);
++ if (ret)
++ goto free_bufs; /* cpu hotplug locked */
++ }
++ put_online_cpus();
++#else
++ for_each_possible_cpu(i) {
++ ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
++ chanb, i);
++ if (ret)
++ goto free_bufs; /* cpu hotplug locked */
++ }
++#endif
++ } else {
++ chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL);
++ if (!chanb->buf)
++ goto free_cpumask;
++ ret = lib_ring_buffer_create(chanb->buf, chanb, -1);
++ if (ret)
++ goto free_bufs;
++ }
++ chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
++
++ return 0;
++
++free_bufs:
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
++ for_each_possible_cpu(i) {
++ struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
++
++ if (!buf->backend.allocated)
++ continue;
++ lib_ring_buffer_free(buf);
++ }
++#ifdef CONFIG_HOTPLUG_CPU
++ put_online_cpus();
++#endif
++ free_percpu(chanb->buf);
++ } else
++ kfree(chanb->buf);
++free_cpumask:
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
++ free_cpumask_var(chanb->cpumask);
++ return -ENOMEM;
++}
++
++/**
++ * channel_backend_unregister_notifiers - unregister notifiers
++ * @chan: the channel
++ *
++ * Holds CPU hotplug.
++ */
++void channel_backend_unregister_notifiers(struct channel_backend *chanb)
++{
++ const struct lib_ring_buffer_config *config = &chanb->config;
++
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
++ unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
++}
++
++/**
++ * channel_backend_free - destroy the channel
++ * @chan: the channel
++ *
++ * Destroy all channel buffers and frees the channel.
++ */
++void channel_backend_free(struct channel_backend *chanb)
++{
++ const struct lib_ring_buffer_config *config = &chanb->config;
++ unsigned int i;
++
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
++ for_each_possible_cpu(i) {
++ struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
++
++ if (!buf->backend.allocated)
++ continue;
++ lib_ring_buffer_free(buf);
++ }
++ free_cpumask_var(chanb->cpumask);
++ free_percpu(chanb->buf);
++ } else {
++ struct lib_ring_buffer *buf = chanb->buf;
++
++ CHAN_WARN_ON(chanb, !buf->backend.allocated);
++ lib_ring_buffer_free(buf);
++ kfree(buf);
++ }
++}
++
++/**
++ * lib_ring_buffer_write - write data to a ring_buffer buffer.
++ * @bufb : buffer backend
++ * @offset : offset within the buffer
++ * @src : source address
++ * @len : length to write
++ * @pagecpy : page size copied so far
++ */
++void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset,
++ const void *src, size_t len, ssize_t pagecpy)
++{
++ struct channel_backend *chanb = &bufb->chan->backend;
++ const struct lib_ring_buffer_config *config = &chanb->config;
++ size_t sbidx, index;
++ struct lib_ring_buffer_backend_pages *rpages;
++ unsigned long sb_bindex, id;
++
++ do {
++ len -= pagecpy;
++ src += pagecpy;
++ offset += pagecpy;
++ sbidx = offset >> chanb->subbuf_size_order;
++ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
++
++ /*
++ * Underlying layer should never ask for writes across
++ * subbuffers.
++ */
++ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
++
++ pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
++ id = bufb->buf_wsb[sbidx].id;
++ sb_bindex = subbuffer_id_get_index(config, id);
++ rpages = bufb->array[sb_bindex];
++ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
++ && subbuffer_id_is_noref(config, id));
++ lib_ring_buffer_do_copy(config,
++ rpages->p[index].virt
++ + (offset & ~PAGE_MASK),
++ src, pagecpy);
++ } while (unlikely(len != pagecpy));
++}
++EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
++
++
++/**
++ * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
++ * @bufb : buffer backend
++ * @offset : offset within the buffer
++ * @c : the byte to write
++ * @len : length to write
++ * @pagecpy : page size copied so far
++ */
++void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
++ size_t offset,
++ int c, size_t len, ssize_t pagecpy)
++{
++ struct channel_backend *chanb = &bufb->chan->backend;
++ const struct lib_ring_buffer_config *config = &chanb->config;
++ size_t sbidx, index;
++ struct lib_ring_buffer_backend_pages *rpages;
++ unsigned long sb_bindex, id;
++
++ do {
++ len -= pagecpy;
++ offset += pagecpy;
++ sbidx = offset >> chanb->subbuf_size_order;
++ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
++
++ /*
++ * Underlying layer should never ask for writes across
++ * subbuffers.
++ */
++ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
++
++ pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
++ id = bufb->buf_wsb[sbidx].id;
++ sb_bindex = subbuffer_id_get_index(config, id);
++ rpages = bufb->array[sb_bindex];
++ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
++ && subbuffer_id_is_noref(config, id));
++ lib_ring_buffer_do_memset(rpages->p[index].virt
++ + (offset & ~PAGE_MASK),
++ c, pagecpy);
++ } while (unlikely(len != pagecpy));
++}
++EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
++
++
++/**
++ * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
++ * @bufb : buffer backend
++ * @offset : offset within the buffer
++ * @src : source address
++ * @len : length to write
++ * @pagecpy : page size copied so far
++ *
++ * This function deals with userspace pointers, it should never be called
++ * directly without having the src pointer checked with access_ok()
++ * previously.
++ */
++void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
++ size_t offset,
++ const void __user *src, size_t len,
++ ssize_t pagecpy)
++{
++ struct channel_backend *chanb = &bufb->chan->backend;
++ const struct lib_ring_buffer_config *config = &chanb->config;
++ size_t sbidx, index;
++ struct lib_ring_buffer_backend_pages *rpages;
++ unsigned long sb_bindex, id;
++ int ret;
++
++ do {
++ len -= pagecpy;
++ src += pagecpy;
++ offset += pagecpy;
++ sbidx = offset >> chanb->subbuf_size_order;
++ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
++
++ /*
++ * Underlying layer should never ask for writes across
++ * subbuffers.
++ */
++ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
++
++ pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
++ id = bufb->buf_wsb[sbidx].id;
++ sb_bindex = subbuffer_id_get_index(config, id);
++ rpages = bufb->array[sb_bindex];
++ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
++ && subbuffer_id_is_noref(config, id));
++ ret = lib_ring_buffer_do_copy_from_user_inatomic(rpages->p[index].virt
++ + (offset & ~PAGE_MASK),
++ src, pagecpy) != 0;
++ if (ret > 0) {
++ offset += (pagecpy - ret);
++ len -= (pagecpy - ret);
++ _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
++ break; /* stop copy */
++ }
++ } while (unlikely(len != pagecpy));
++}
++EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic);
++
++/**
++ * lib_ring_buffer_read - read data from ring_buffer_buffer.
++ * @bufb : buffer backend
++ * @offset : offset within the buffer
++ * @dest : destination address
++ * @len : length to copy to destination
++ *
++ * Should be protected by get_subbuf/put_subbuf.
++ * Returns the length copied.
++ */
++size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset,
++ void *dest, size_t len)
++{
++ struct channel_backend *chanb = &bufb->chan->backend;
++ const struct lib_ring_buffer_config *config = &chanb->config;
++ size_t index;
++ ssize_t pagecpy, orig_len;
++ struct lib_ring_buffer_backend_pages *rpages;
++ unsigned long sb_bindex, id;
++
++ orig_len = len;
++ offset &= chanb->buf_size - 1;
++ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
++ if (unlikely(!len))
++ return 0;
++ for (;;) {
++ pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
++ id = bufb->buf_rsb.id;
++ sb_bindex = subbuffer_id_get_index(config, id);
++ rpages = bufb->array[sb_bindex];
++ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
++ && subbuffer_id_is_noref(config, id));
++ memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK),
++ pagecpy);
++ len -= pagecpy;
++ if (likely(!len))
++ break;
++ dest += pagecpy;
++ offset += pagecpy;
++ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
++ /*
++ * Underlying layer should never ask for reads across
++ * subbuffers.
++ */
++ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
++ }
++ return orig_len;
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_read);
++
++/**
++ * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
++ * @bufb : buffer backend
++ * @offset : offset within the buffer
++ * @dest : destination userspace address
++ * @len : length to copy to destination
++ *
++ * Should be protected by get_subbuf/put_subbuf.
++ * access_ok() must have been performed on dest addresses prior to call this
++ * function.
++ * Returns -EFAULT on error, 0 if ok.
++ */
++int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
++ size_t offset, void __user *dest, size_t len)
++{
++ struct channel_backend *chanb = &bufb->chan->backend;
++ const struct lib_ring_buffer_config *config = &chanb->config;
++ size_t index;
++ ssize_t pagecpy;
++ struct lib_ring_buffer_backend_pages *rpages;
++ unsigned long sb_bindex, id;
++
++ offset &= chanb->buf_size - 1;
++ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
++ if (unlikely(!len))
++ return 0;
++ for (;;) {
++ pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
++ id = bufb->buf_rsb.id;
++ sb_bindex = subbuffer_id_get_index(config, id);
++ rpages = bufb->array[sb_bindex];
++ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
++ && subbuffer_id_is_noref(config, id));
++ if (__copy_to_user(dest,
++ rpages->p[index].virt + (offset & ~PAGE_MASK),
++ pagecpy))
++ return -EFAULT;
++ len -= pagecpy;
++ if (likely(!len))
++ break;
++ dest += pagecpy;
++ offset += pagecpy;
++ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
++ /*
++ * Underlying layer should never ask for reads across
++ * subbuffers.
++ */
++ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
++ }
++ return 0;
++}
++EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user);
++
++/**
++ * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
++ * @bufb : buffer backend
++ * @offset : offset within the buffer
++ * @dest : destination address
++ * @len : destination's length
++ *
++ * Return string's length, or -EINVAL on error.
++ * Should be protected by get_subbuf/put_subbuf.
++ * Destination length should be at least 1 to hold '\0'.
++ */
++int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset,
++ void *dest, size_t len)
++{
++ struct channel_backend *chanb = &bufb->chan->backend;
++ const struct lib_ring_buffer_config *config = &chanb->config;
++ size_t index;
++ ssize_t pagecpy, pagelen, strpagelen, orig_offset;
++ char *str;
++ struct lib_ring_buffer_backend_pages *rpages;
++ unsigned long sb_bindex, id;
++
++ offset &= chanb->buf_size - 1;
++ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
++ orig_offset = offset;
++ if (unlikely(!len))
++ return -EINVAL;
++ for (;;) {
++ id = bufb->buf_rsb.id;
++ sb_bindex = subbuffer_id_get_index(config, id);
++ rpages = bufb->array[sb_bindex];
++ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
++ && subbuffer_id_is_noref(config, id));
++ str = (char *)rpages->p[index].virt + (offset & ~PAGE_MASK);
++ pagelen = PAGE_SIZE - (offset & ~PAGE_MASK);
++ strpagelen = strnlen(str, pagelen);
++ if (len) {
++ pagecpy = min_t(size_t, len, strpagelen);
++ if (dest) {
++ memcpy(dest, str, pagecpy);
++ dest += pagecpy;
++ }
++ len -= pagecpy;
++ }
++ offset += strpagelen;
++ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
++ if (strpagelen < pagelen)
++ break;
++ /*
++ * Underlying layer should never ask for reads across
++ * subbuffers.
++ */
++ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
++ }
++ if (dest && len)
++ ((char *)dest)[0] = 0;
++ return offset - orig_offset;
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr);
++
++/**
++ * lib_ring_buffer_read_get_page - Get a whole page to read from
++ * @bufb : buffer backend
++ * @offset : offset within the buffer
++ * @virt : pointer to page address (output)
++ *
++ * Should be protected by get_subbuf/put_subbuf.
++ * Returns the pointer to the page struct pointer.
++ */
++struct page **lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend *bufb,
++ size_t offset, void ***virt)
++{
++ size_t index;
++ struct lib_ring_buffer_backend_pages *rpages;
++ struct channel_backend *chanb = &bufb->chan->backend;
++ const struct lib_ring_buffer_config *config = &chanb->config;
++ unsigned long sb_bindex, id;
++
++ offset &= chanb->buf_size - 1;
++ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
++ id = bufb->buf_rsb.id;
++ sb_bindex = subbuffer_id_get_index(config, id);
++ rpages = bufb->array[sb_bindex];
++ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
++ && subbuffer_id_is_noref(config, id));
++ *virt = &rpages->p[index].virt;
++ return &rpages->p[index].page;
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_page);
++
++/**
++ * lib_ring_buffer_read_offset_address - get address of a buffer location
++ * @bufb : buffer backend
++ * @offset : offset within the buffer.
++ *
++ * Return the address where a given offset is located (for read).
++ * Should be used to get the current subbuffer header pointer. Given we know
++ * it's never on a page boundary, it's safe to write directly to this address,
++ * as long as the write is never bigger than a page size.
++ */
++void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
++ size_t offset)
++{
++ size_t index;
++ struct lib_ring_buffer_backend_pages *rpages;
++ struct channel_backend *chanb = &bufb->chan->backend;
++ const struct lib_ring_buffer_config *config = &chanb->config;
++ unsigned long sb_bindex, id;
++
++ offset &= chanb->buf_size - 1;
++ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
++ id = bufb->buf_rsb.id;
++ sb_bindex = subbuffer_id_get_index(config, id);
++ rpages = bufb->array[sb_bindex];
++ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
++ && subbuffer_id_is_noref(config, id));
++ return rpages->p[index].virt + (offset & ~PAGE_MASK);
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address);
++
++/**
++ * lib_ring_buffer_offset_address - get address of a location within the buffer
++ * @bufb : buffer backend
++ * @offset : offset within the buffer.
++ *
++ * Return the address where a given offset is located.
++ * Should be used to get the current subbuffer header pointer. Given we know
++ * it's always at the beginning of a page, it's safe to write directly to this
++ * address, as long as the write is never bigger than a page size.
++ */
++void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
++ size_t offset)
++{
++ size_t sbidx, index;
++ struct lib_ring_buffer_backend_pages *rpages;
++ struct channel_backend *chanb = &bufb->chan->backend;
++ const struct lib_ring_buffer_config *config = &chanb->config;
++ unsigned long sb_bindex, id;
++
++ offset &= chanb->buf_size - 1;
++ sbidx = offset >> chanb->subbuf_size_order;
++ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
++ id = bufb->buf_wsb[sbidx].id;
++ sb_bindex = subbuffer_id_get_index(config, id);
++ rpages = bufb->array[sb_bindex];
++ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
++ && subbuffer_id_is_noref(config, id));
++ return rpages->p[index].virt + (offset & ~PAGE_MASK);
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address);
+--- /dev/null
++++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c
+@@ -0,0 +1,1830 @@
++/*
++ * ring_buffer_frontend.c
++ *
++ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ *
++ *
++ * Ring buffer wait-free buffer synchronization. Producer-consumer and flight
++ * recorder (overwrite) modes. See thesis:
++ *
++ * Desnoyers, Mathieu (2009), "Low-Impact Operating System Tracing", Ph.D.
++ * dissertation, Ecole Polytechnique de Montreal.
++ * http://www.lttng.org/pub/thesis/desnoyers-dissertation-2009-12.pdf
++ *
++ * - Algorithm presentation in Chapter 5:
++ * "Lockless Multi-Core High-Throughput Buffering".
++ * - Algorithm formal verification in Section 8.6:
++ * "Formal verification of LTTng"
++ *
++ * Author:
++ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * Inspired from LTT and RelayFS:
++ * Karim Yaghmour <karim@opersys.com>
++ * Tom Zanussi <zanussi@us.ibm.com>
++ * Bob Wisniewski <bob@watson.ibm.com>
++ * And from K42 :
++ * Bob Wisniewski <bob@watson.ibm.com>
++ *
++ * Buffer reader semantic :
++ *
++ * - get_subbuf_size
++ * while buffer is not finalized and empty
++ * - get_subbuf
++ * - if return value != 0, continue
++ * - splice one subbuffer worth of data to a pipe
++ * - splice the data from pipe to disk/network
++ * - put_subbuf
++ */
++
++#include <linux/delay.h>
++#include <linux/module.h>
++#include <linux/percpu.h>
++
++#include "../../wrapper/ringbuffer/config.h"
++#include "../../wrapper/ringbuffer/backend.h"
++#include "../../wrapper/ringbuffer/frontend.h"
++#include "../../wrapper/ringbuffer/iterator.h"
++#include "../../wrapper/ringbuffer/nohz.h"
++
++/*
++ * Internal structure representing offsets to use at a sub-buffer switch.
++ */
++struct switch_offsets {
++ unsigned long begin, end, old;
++ size_t pre_header_padding, size;
++ unsigned int switch_new_start:1, switch_new_end:1, switch_old_start:1,
++ switch_old_end:1;
++};
++
++#ifdef CONFIG_NO_HZ
++enum tick_nohz_val {
++ TICK_NOHZ_STOP,
++ TICK_NOHZ_FLUSH,
++ TICK_NOHZ_RESTART,
++};
++
++static ATOMIC_NOTIFIER_HEAD(tick_nohz_notifier);
++#endif /* CONFIG_NO_HZ */
++
++static DEFINE_PER_CPU(spinlock_t, ring_buffer_nohz_lock);
++
++DEFINE_PER_CPU(unsigned int, lib_ring_buffer_nesting);
++EXPORT_PER_CPU_SYMBOL(lib_ring_buffer_nesting);
++
++static
++void lib_ring_buffer_print_errors(struct channel *chan,
++ struct lib_ring_buffer *buf, int cpu);
++
++/*
++ * Must be called under cpu hotplug protection.
++ */
++void lib_ring_buffer_free(struct lib_ring_buffer *buf)
++{
++ struct channel *chan = buf->backend.chan;
++
++ lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu);
++ kfree(buf->commit_hot);
++ kfree(buf->commit_cold);
++
++ lib_ring_buffer_backend_free(&buf->backend);
++}
++
++/**
++ * lib_ring_buffer_reset - Reset ring buffer to initial values.
++ * @buf: Ring buffer.
++ *
++ * Effectively empty the ring buffer. Should be called when the buffer is not
++ * used for writing. The ring buffer can be opened for reading, but the reader
++ * should not be using the iterator concurrently with reset. The previous
++ * current iterator record is reset.
++ */
++void lib_ring_buffer_reset(struct lib_ring_buffer *buf)
++{
++ struct channel *chan = buf->backend.chan;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ unsigned int i;
++
++ /*
++ * Reset iterator first. It will put the subbuffer if it currently holds
++ * it.
++ */
++ lib_ring_buffer_iterator_reset(buf);
++ v_set(config, &buf->offset, 0);
++ for (i = 0; i < chan->backend.num_subbuf; i++) {
++ v_set(config, &buf->commit_hot[i].cc, 0);
++ v_set(config, &buf->commit_hot[i].seq, 0);
++ v_set(config, &buf->commit_cold[i].cc_sb, 0);
++ }
++ atomic_long_set(&buf->consumed, 0);
++ atomic_set(&buf->record_disabled, 0);
++ v_set(config, &buf->last_tsc, 0);
++ lib_ring_buffer_backend_reset(&buf->backend);
++ /* Don't reset number of active readers */
++ v_set(config, &buf->records_lost_full, 0);
++ v_set(config, &buf->records_lost_wrap, 0);
++ v_set(config, &buf->records_lost_big, 0);
++ v_set(config, &buf->records_count, 0);
++ v_set(config, &buf->records_overrun, 0);
++ buf->finalized = 0;
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_reset);
++
++/**
++ * channel_reset - Reset channel to initial values.
++ * @chan: Channel.
++ *
++ * Effectively empty the channel. Should be called when the channel is not used
++ * for writing. The channel can be opened for reading, but the reader should not
++ * be using the iterator concurrently with reset. The previous current iterator
++ * record is reset.
++ */
++void channel_reset(struct channel *chan)
++{
++ /*
++ * Reset iterators first. Will put the subbuffer if held for reading.
++ */
++ channel_iterator_reset(chan);
++ atomic_set(&chan->record_disabled, 0);
++ /* Don't reset commit_count_mask, still valid */
++ channel_backend_reset(&chan->backend);
++ /* Don't reset switch/read timer interval */
++ /* Don't reset notifiers and notifier enable bits */
++ /* Don't reset reader reference count */
++}
++EXPORT_SYMBOL_GPL(channel_reset);
++
++/*
++ * Must be called under cpu hotplug protection.
++ */
++int lib_ring_buffer_create(struct lib_ring_buffer *buf,
++ struct channel_backend *chanb, int cpu)
++{
++ const struct lib_ring_buffer_config *config = &chanb->config;
++ struct channel *chan = container_of(chanb, struct channel, backend);
++ void *priv = chanb->priv;
++ size_t subbuf_header_size;
++ u64 tsc;
++ int ret;
++
++ /* Test for cpu hotplug */
++ if (buf->backend.allocated)
++ return 0;
++
++ /*
++ * Paranoia: per cpu dynamic allocation is not officially documented as
++ * zeroing the memory, so let's do it here too, just in case.
++ */
++ memset(buf, 0, sizeof(*buf));
++
++ ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend, cpu);
++ if (ret)
++ return ret;
++
++ buf->commit_hot =
++ kzalloc_node(ALIGN(sizeof(*buf->commit_hot)
++ * chan->backend.num_subbuf,
++ 1 << INTERNODE_CACHE_SHIFT),
++ GFP_KERNEL, cpu_to_node(max(cpu, 0)));
++ if (!buf->commit_hot) {
++ ret = -ENOMEM;
++ goto free_chanbuf;
++ }
++
++ buf->commit_cold =
++ kzalloc_node(ALIGN(sizeof(*buf->commit_cold)
++ * chan->backend.num_subbuf,
++ 1 << INTERNODE_CACHE_SHIFT),
++ GFP_KERNEL, cpu_to_node(max(cpu, 0)));
++ if (!buf->commit_cold) {
++ ret = -ENOMEM;
++ goto free_commit;
++ }
++
++ init_waitqueue_head(&buf->read_wait);
++ init_waitqueue_head(&buf->write_wait);
++ raw_spin_lock_init(&buf->raw_tick_nohz_spinlock);
++
++ /*
++ * Write the subbuffer header for first subbuffer so we know the total
++ * duration of data gathering.
++ */
++ subbuf_header_size = config->cb.subbuffer_header_size();
++ v_set(config, &buf->offset, subbuf_header_size);
++ subbuffer_id_clear_noref(config, &buf->backend.buf_wsb[0].id);
++ tsc = config->cb.ring_buffer_clock_read(buf->backend.chan);
++ config->cb.buffer_begin(buf, tsc, 0);
++ v_add(config, subbuf_header_size, &buf->commit_hot[0].cc);
++
++ if (config->cb.buffer_create) {
++ ret = config->cb.buffer_create(buf, priv, cpu, chanb->name);
++ if (ret)
++ goto free_init;
++ }
++
++ /*
++ * Ensure the buffer is ready before setting it to allocated and setting
++ * the cpumask.
++ * Used for cpu hotplug vs cpumask iteration.
++ */
++ smp_wmb();
++ buf->backend.allocated = 1;
++
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
++ CHAN_WARN_ON(chan, cpumask_test_cpu(cpu,
++ chan->backend.cpumask));
++ cpumask_set_cpu(cpu, chan->backend.cpumask);
++ }
++
++ return 0;
++
++ /* Error handling */
++free_init:
++ kfree(buf->commit_cold);
++free_commit:
++ kfree(buf->commit_hot);
++free_chanbuf:
++ lib_ring_buffer_backend_free(&buf->backend);
++ return ret;
++}
++
++static void switch_buffer_timer(unsigned long data)
++{
++ struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
++ struct channel *chan = buf->backend.chan;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++
++ /*
++ * Only flush buffers periodically if readers are active.
++ */
++ if (atomic_long_read(&buf->active_readers))
++ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
++
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
++ mod_timer_pinned(&buf->switch_timer,
++ jiffies + chan->switch_timer_interval);
++ else
++ mod_timer(&buf->switch_timer,
++ jiffies + chan->switch_timer_interval);
++}
++
++/*
++ * Called with ring_buffer_nohz_lock held for per-cpu buffers.
++ */
++static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf)
++{
++ struct channel *chan = buf->backend.chan;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++
++ if (!chan->switch_timer_interval || buf->switch_timer_enabled)
++ return;
++ init_timer(&buf->switch_timer);
++ buf->switch_timer.function = switch_buffer_timer;
++ buf->switch_timer.expires = jiffies + chan->switch_timer_interval;
++ buf->switch_timer.data = (unsigned long)buf;
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
++ add_timer_on(&buf->switch_timer, buf->backend.cpu);
++ else
++ add_timer(&buf->switch_timer);
++ buf->switch_timer_enabled = 1;
++}
++
++/*
++ * Called with ring_buffer_nohz_lock held for per-cpu buffers.
++ */
++static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf)
++{
++ struct channel *chan = buf->backend.chan;
++
++ if (!chan->switch_timer_interval || !buf->switch_timer_enabled)
++ return;
++
++ del_timer_sync(&buf->switch_timer);
++ buf->switch_timer_enabled = 0;
++}
++
++/*
++ * Polling timer to check the channels for data.
++ */
++static void read_buffer_timer(unsigned long data)
++{
++ struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
++ struct channel *chan = buf->backend.chan;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++
++ CHAN_WARN_ON(chan, !buf->backend.allocated);
++
++ if (atomic_long_read(&buf->active_readers)
++ && lib_ring_buffer_poll_deliver(config, buf, chan)) {
++ wake_up_interruptible(&buf->read_wait);
++ wake_up_interruptible(&chan->read_wait);
++ }
++
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
++ mod_timer_pinned(&buf->read_timer,
++ jiffies + chan->read_timer_interval);
++ else
++ mod_timer(&buf->read_timer,
++ jiffies + chan->read_timer_interval);
++}
++
++/*
++ * Called with ring_buffer_nohz_lock held for per-cpu buffers.
++ */
++static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf)
++{
++ struct channel *chan = buf->backend.chan;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++
++ if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
++ || !chan->read_timer_interval
++ || buf->read_timer_enabled)
++ return;
++
++ init_timer(&buf->read_timer);
++ buf->read_timer.function = read_buffer_timer;
++ buf->read_timer.expires = jiffies + chan->read_timer_interval;
++ buf->read_timer.data = (unsigned long)buf;
++
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
++ add_timer_on(&buf->read_timer, buf->backend.cpu);
++ else
++ add_timer(&buf->read_timer);
++ buf->read_timer_enabled = 1;
++}
++
++/*
++ * Called with ring_buffer_nohz_lock held for per-cpu buffers.
++ */
++static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf)
++{
++ struct channel *chan = buf->backend.chan;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++
++ if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
++ || !chan->read_timer_interval
++ || !buf->read_timer_enabled)
++ return;
++
++ del_timer_sync(&buf->read_timer);
++ /*
++ * do one more check to catch data that has been written in the last
++ * timer period.
++ */
++ if (lib_ring_buffer_poll_deliver(config, buf, chan)) {
++ wake_up_interruptible(&buf->read_wait);
++ wake_up_interruptible(&chan->read_wait);
++ }
++ buf->read_timer_enabled = 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++/**
++ * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
++ * @nb: notifier block
++ * @action: hotplug action to take
++ * @hcpu: CPU number
++ *
++ * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
++ */
++static
++int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
++ unsigned long action,
++ void *hcpu)
++{
++ unsigned int cpu = (unsigned long)hcpu;
++ struct channel *chan = container_of(nb, struct channel,
++ cpu_hp_notifier);
++ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++
++ if (!chan->cpu_hp_enable)
++ return NOTIFY_DONE;
++
++ CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
++
++ switch (action) {
++ case CPU_DOWN_FAILED:
++ case CPU_DOWN_FAILED_FROZEN:
++ case CPU_ONLINE:
++ case CPU_ONLINE_FROZEN:
++ wake_up_interruptible(&chan->hp_wait);
++ lib_ring_buffer_start_switch_timer(buf);
++ lib_ring_buffer_start_read_timer(buf);
++ return NOTIFY_OK;
++
++ case CPU_DOWN_PREPARE:
++ case CPU_DOWN_PREPARE_FROZEN:
++ lib_ring_buffer_stop_switch_timer(buf);
++ lib_ring_buffer_stop_read_timer(buf);
++ return NOTIFY_OK;
++
++ case CPU_DEAD:
++ case CPU_DEAD_FROZEN:
++ /*
++ * Performing a buffer switch on a remote CPU. Performed by
++ * the CPU responsible for doing the hotunplug after the target
++ * CPU stopped running completely. Ensures that all data
++ * from that remote CPU is flushed.
++ */
++ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
++ return NOTIFY_OK;
++
++ default:
++ return NOTIFY_DONE;
++ }
++}
++#endif
++
++#if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER)
++/*
++ * For per-cpu buffers, call the reader wakeups before switching the buffer, so
++ * that wake-up-tracing generated events are flushed before going idle (in
++ * tick_nohz). We test if the spinlock is locked to deal with the race where
++ * readers try to sample the ring buffer before we perform the switch. We let
++ * the readers retry in that case. If there is data in the buffer, the wake up
++ * is going to forbid the CPU running the reader thread from going idle.
++ */
++static int notrace ring_buffer_tick_nohz_callback(struct notifier_block *nb,
++ unsigned long val,
++ void *data)
++{
++ struct channel *chan = container_of(nb, struct channel,
++ tick_nohz_notifier);
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ struct lib_ring_buffer *buf;
++ int cpu = smp_processor_id();
++
++ if (config->alloc != RING_BUFFER_ALLOC_PER_CPU) {
++ /*
++ * We don't support keeping the system idle with global buffers
++ * and streaming active. In order to do so, we would need to
++ * sample a non-nohz-cpumask racelessly with the nohz updates
++ * without adding synchronization overhead to nohz. Leave this
++ * use-case out for now.
++ */
++ return 0;
++ }
++
++ buf = channel_get_ring_buffer(config, chan, cpu);
++ switch (val) {
++ case TICK_NOHZ_FLUSH:
++ raw_spin_lock(&buf->raw_tick_nohz_spinlock);
++ if (config->wakeup == RING_BUFFER_WAKEUP_BY_TIMER
++ && chan->read_timer_interval
++ && atomic_long_read(&buf->active_readers)
++ && (lib_ring_buffer_poll_deliver(config, buf, chan)
++ || lib_ring_buffer_pending_data(config, buf, chan))) {
++ wake_up_interruptible(&buf->read_wait);
++ wake_up_interruptible(&chan->read_wait);
++ }
++ if (chan->switch_timer_interval)
++ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
++ raw_spin_unlock(&buf->raw_tick_nohz_spinlock);
++ break;
++ case TICK_NOHZ_STOP:
++ spin_lock(&__get_cpu_var(ring_buffer_nohz_lock));
++ lib_ring_buffer_stop_switch_timer(buf);
++ lib_ring_buffer_stop_read_timer(buf);
++ spin_unlock(&__get_cpu_var(ring_buffer_nohz_lock));
++ break;
++ case TICK_NOHZ_RESTART:
++ spin_lock(&__get_cpu_var(ring_buffer_nohz_lock));
++ lib_ring_buffer_start_read_timer(buf);
++ lib_ring_buffer_start_switch_timer(buf);
++ spin_unlock(&__get_cpu_var(ring_buffer_nohz_lock));
++ break;
++ }
++
++ return 0;
++}
++
++void notrace lib_ring_buffer_tick_nohz_flush(void)
++{
++ atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_FLUSH,
++ NULL);
++}
++
++void notrace lib_ring_buffer_tick_nohz_stop(void)
++{
++ atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_STOP,
++ NULL);
++}
++
++void notrace lib_ring_buffer_tick_nohz_restart(void)
++{
++ atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_RESTART,
++ NULL);
++}
++#endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */
++
++/*
++ * Holds CPU hotplug.
++ */
++static void channel_unregister_notifiers(struct channel *chan)
++{
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ int cpu;
++
++ channel_iterator_unregister_notifiers(chan);
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
++#ifdef CONFIG_NO_HZ
++ /*
++ * Remove the nohz notifier first, so we are certain we stop
++ * the timers.
++ */
++ atomic_notifier_chain_unregister(&tick_nohz_notifier,
++ &chan->tick_nohz_notifier);
++ /*
++ * ring_buffer_nohz_lock will not be needed below, because
++ * we just removed the notifiers, which were the only source of
++ * concurrency.
++ */
++#endif /* CONFIG_NO_HZ */
++#ifdef CONFIG_HOTPLUG_CPU
++ get_online_cpus();
++ chan->cpu_hp_enable = 0;
++ for_each_online_cpu(cpu) {
++ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
++ cpu);
++ lib_ring_buffer_stop_switch_timer(buf);
++ lib_ring_buffer_stop_read_timer(buf);
++ }
++ put_online_cpus();
++ unregister_cpu_notifier(&chan->cpu_hp_notifier);
++#else
++ for_each_possible_cpu(cpu) {
++ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
++ cpu);
++ lib_ring_buffer_stop_switch_timer(buf);
++ lib_ring_buffer_stop_read_timer(buf);
++ }
++#endif
++ } else {
++ struct lib_ring_buffer *buf = chan->backend.buf;
++
++ lib_ring_buffer_stop_switch_timer(buf);
++ lib_ring_buffer_stop_read_timer(buf);
++ }
++ channel_backend_unregister_notifiers(&chan->backend);
++}
++
++static void channel_free(struct channel *chan)
++{
++ channel_iterator_free(chan);
++ channel_backend_free(&chan->backend);
++ kfree(chan);
++}
++
++/**
++ * channel_create - Create channel.
++ * @config: ring buffer instance configuration
++ * @name: name of the channel
++ * @priv: ring buffer client private data
++ * @buf_addr: pointer the the beginning of the preallocated buffer contiguous
++ * address mapping. It is used only by RING_BUFFER_STATIC
++ * configuration. It can be set to NULL for other backends.
++ * @subbuf_size: subbuffer size
++ * @num_subbuf: number of subbuffers
++ * @switch_timer_interval: Time interval (in us) to fill sub-buffers with
++ * padding to let readers get those sub-buffers.
++ * Used for live streaming.
++ * @read_timer_interval: Time interval (in us) to wake up pending readers.
++ *
++ * Holds cpu hotplug.
++ * Returns NULL on failure.
++ */
++struct channel *channel_create(const struct lib_ring_buffer_config *config,
++ const char *name, void *priv, void *buf_addr,
++ size_t subbuf_size,
++ size_t num_subbuf, unsigned int switch_timer_interval,
++ unsigned int read_timer_interval)
++{
++ int ret, cpu;
++ struct channel *chan;
++
++ if (lib_ring_buffer_check_config(config, switch_timer_interval,
++ read_timer_interval))
++ return NULL;
++
++ chan = kzalloc(sizeof(struct channel), GFP_KERNEL);
++ if (!chan)
++ return NULL;
++
++ ret = channel_backend_init(&chan->backend, name, config, priv,
++ subbuf_size, num_subbuf);
++ if (ret)
++ goto error;
++
++ ret = channel_iterator_init(chan);
++ if (ret)
++ goto error_free_backend;
++
++ chan->commit_count_mask = (~0UL >> chan->backend.num_subbuf_order);
++ chan->switch_timer_interval = usecs_to_jiffies(switch_timer_interval);
++ chan->read_timer_interval = usecs_to_jiffies(read_timer_interval);
++ kref_init(&chan->ref);
++ init_waitqueue_head(&chan->read_wait);
++ init_waitqueue_head(&chan->hp_wait);
++
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
++#if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER)
++ /* Only benefit from NO_HZ idle with per-cpu buffers for now. */
++ chan->tick_nohz_notifier.notifier_call =
++ ring_buffer_tick_nohz_callback;
++ chan->tick_nohz_notifier.priority = ~0U;
++ atomic_notifier_chain_register(&tick_nohz_notifier,
++ &chan->tick_nohz_notifier);
++#endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */
++
++ /*
++ * In case of non-hotplug cpu, if the ring-buffer is allocated
++ * in early initcall, it will not be notified of secondary cpus.
++ * In that off case, we need to allocate for all possible cpus.
++ */
++#ifdef CONFIG_HOTPLUG_CPU
++ chan->cpu_hp_notifier.notifier_call =
++ lib_ring_buffer_cpu_hp_callback;
++ chan->cpu_hp_notifier.priority = 6;
++ register_cpu_notifier(&chan->cpu_hp_notifier);
++
++ get_online_cpus();
++ for_each_online_cpu(cpu) {
++ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
++ cpu);
++ spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
++ lib_ring_buffer_start_switch_timer(buf);
++ lib_ring_buffer_start_read_timer(buf);
++ spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu));
++ }
++ chan->cpu_hp_enable = 1;
++ put_online_cpus();
++#else
++ for_each_possible_cpu(cpu) {
++ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
++ cpu);
++ spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
++ lib_ring_buffer_start_switch_timer(buf);
++ lib_ring_buffer_start_read_timer(buf);
++ spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu));
++ }
++#endif
++ } else {
++ struct lib_ring_buffer *buf = chan->backend.buf;
++
++ lib_ring_buffer_start_switch_timer(buf);
++ lib_ring_buffer_start_read_timer(buf);
++ }
++
++ return chan;
++
++error_free_backend:
++ channel_backend_free(&chan->backend);
++error:
++ kfree(chan);
++ return NULL;
++}
++EXPORT_SYMBOL_GPL(channel_create);
++
++static
++void channel_release(struct kref *kref)
++{
++ struct channel *chan = container_of(kref, struct channel, ref);
++ channel_free(chan);
++}
++
++/**
++ * channel_destroy - Finalize, wait for q.s. and destroy channel.
++ * @chan: channel to destroy
++ *
++ * Holds cpu hotplug.
++ * Call "destroy" callback, finalize channels, and then decrement the
++ * channel reference count. Note that when readers have completed data
++ * consumption of finalized channels, get_subbuf() will return -ENODATA.
++ * They should release their handle at that point. Returns the private
++ * data pointer.
++ */
++void *channel_destroy(struct channel *chan)
++{
++ int cpu;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ void *priv;
++
++ channel_unregister_notifiers(chan);
++
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
++ /*
++ * No need to hold cpu hotplug, because all notifiers have been
++ * unregistered.
++ */
++ for_each_channel_cpu(cpu, chan) {
++ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
++ cpu);
++
++ if (config->cb.buffer_finalize)
++ config->cb.buffer_finalize(buf,
++ chan->backend.priv,
++ cpu);
++ if (buf->backend.allocated)
++ lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH);
++ /*
++ * Perform flush before writing to finalized.
++ */
++ smp_wmb();
++ ACCESS_ONCE(buf->finalized) = 1;
++ wake_up_interruptible(&buf->read_wait);
++ }
++ } else {
++ struct lib_ring_buffer *buf = chan->backend.buf;
++
++ if (config->cb.buffer_finalize)
++ config->cb.buffer_finalize(buf, chan->backend.priv, -1);
++ if (buf->backend.allocated)
++ lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH);
++ /*
++ * Perform flush before writing to finalized.
++ */
++ smp_wmb();
++ ACCESS_ONCE(buf->finalized) = 1;
++ wake_up_interruptible(&buf->read_wait);
++ }
++ ACCESS_ONCE(chan->finalized) = 1;
++ wake_up_interruptible(&chan->hp_wait);
++ wake_up_interruptible(&chan->read_wait);
++ priv = chan->backend.priv;
++ kref_put(&chan->ref, channel_release);
++ return priv;
++}
++EXPORT_SYMBOL_GPL(channel_destroy);
++
++struct lib_ring_buffer *channel_get_ring_buffer(
++ const struct lib_ring_buffer_config *config,
++ struct channel *chan, int cpu)
++{
++ if (config->alloc == RING_BUFFER_ALLOC_GLOBAL)
++ return chan->backend.buf;
++ else
++ return per_cpu_ptr(chan->backend.buf, cpu);
++}
++EXPORT_SYMBOL_GPL(channel_get_ring_buffer);
++
++int lib_ring_buffer_open_read(struct lib_ring_buffer *buf)
++{
++ struct channel *chan = buf->backend.chan;
++
++ if (!atomic_long_add_unless(&buf->active_readers, 1, 1))
++ return -EBUSY;
++ kref_get(&chan->ref);
++ smp_mb__after_atomic_inc();
++ return 0;
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_open_read);
++
++void lib_ring_buffer_release_read(struct lib_ring_buffer *buf)
++{
++ struct channel *chan = buf->backend.chan;
++
++ CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
++ smp_mb__before_atomic_dec();
++ atomic_long_dec(&buf->active_readers);
++ kref_put(&chan->ref, channel_release);
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_release_read);
++
++/*
++ * Promote compiler barrier to a smp_mb().
++ * For the specific ring buffer case, this IPI call should be removed if the
++ * architecture does not reorder writes. This should eventually be provided by
++ * a separate architecture-specific infrastructure.
++ */
++static void remote_mb(void *info)
++{
++ smp_mb();
++}
++
++/**
++ * lib_ring_buffer_snapshot - save subbuffer position snapshot (for read)
++ * @buf: ring buffer
++ * @consumed: consumed count indicating the position where to read
++ * @produced: produced count, indicates position when to stop reading
++ *
++ * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
++ * data to read at consumed position, or 0 if the get operation succeeds.
++ * Busy-loop trying to get data if the tick_nohz sequence lock is held.
++ */
++
++int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
++ unsigned long *consumed, unsigned long *produced)
++{
++ struct channel *chan = buf->backend.chan;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ unsigned long consumed_cur, write_offset;
++ int finalized;
++
++retry:
++ finalized = ACCESS_ONCE(buf->finalized);
++ /*
++ * Read finalized before counters.
++ */
++ smp_rmb();
++ consumed_cur = atomic_long_read(&buf->consumed);
++ /*
++ * No need to issue a memory barrier between consumed count read and
++ * write offset read, because consumed count can only change
++ * concurrently in overwrite mode, and we keep a sequence counter
++ * identifier derived from the write offset to check we are getting
++ * the same sub-buffer we are expecting (the sub-buffers are atomically
++ * "tagged" upon writes, tags are checked upon read).
++ */
++ write_offset = v_read(config, &buf->offset);
++
++ /*
++ * Check that we are not about to read the same subbuffer in
++ * which the writer head is.
++ */
++ if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_cur, chan)
++ == 0)
++ goto nodata;
++
++ *consumed = consumed_cur;
++ *produced = subbuf_trunc(write_offset, chan);
++
++ return 0;
++
++nodata:
++ /*
++ * The memory barriers __wait_event()/wake_up_interruptible() take care
++ * of "raw_spin_is_locked" memory ordering.
++ */
++ if (finalized)
++ return -ENODATA;
++ else if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
++ goto retry;
++ else
++ return -EAGAIN;
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_snapshot);
++
++/**
++ * lib_ring_buffer_put_snapshot - move consumed counter forward
++ *
++ * Should only be called from consumer context.
++ * @buf: ring buffer
++ * @consumed_new: new consumed count value
++ */
++void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
++ unsigned long consumed_new)
++{
++ struct lib_ring_buffer_backend *bufb = &buf->backend;
++ struct channel *chan = bufb->chan;
++ unsigned long consumed;
++
++ CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
++
++ /*
++ * Only push the consumed value forward.
++ * If the consumed cmpxchg fails, this is because we have been pushed by
++ * the writer in flight recorder mode.
++ */
++ consumed = atomic_long_read(&buf->consumed);
++ while ((long) consumed - (long) consumed_new < 0)
++ consumed = atomic_long_cmpxchg(&buf->consumed, consumed,
++ consumed_new);
++ /* Wake-up the metadata producer */
++ wake_up_interruptible(&buf->write_wait);
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_move_consumer);
++
++/**
++ * lib_ring_buffer_get_subbuf - get exclusive access to subbuffer for reading
++ * @buf: ring buffer
++ * @consumed: consumed count indicating the position where to read
++ *
++ * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
++ * data to read at consumed position, or 0 if the get operation succeeds.
++ * Busy-loop trying to get data if the tick_nohz sequence lock is held.
++ */
++int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
++ unsigned long consumed)
++{
++ struct channel *chan = buf->backend.chan;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
++ int ret;
++ int finalized;
++
++ if (buf->get_subbuf) {
++ /*
++ * Reader is trying to get a subbuffer twice.
++ */
++ CHAN_WARN_ON(chan, 1);
++ return -EBUSY;
++ }
++retry:
++ finalized = ACCESS_ONCE(buf->finalized);
++ /*
++ * Read finalized before counters.
++ */
++ smp_rmb();
++ consumed_cur = atomic_long_read(&buf->consumed);
++ consumed_idx = subbuf_index(consumed, chan);
++ commit_count = v_read(config, &buf->commit_cold[consumed_idx].cc_sb);
++ /*
++ * Make sure we read the commit count before reading the buffer
++ * data and the write offset. Correct consumed offset ordering
++ * wrt commit count is insured by the use of cmpxchg to update
++ * the consumed offset.
++ * smp_call_function_single can fail if the remote CPU is offline,
++ * this is OK because then there is no wmb to execute there.
++ * If our thread is executing on the same CPU as the on the buffers
++ * belongs to, we don't have to synchronize it at all. If we are
++ * migrated, the scheduler will take care of the memory barriers.
++ * Normally, smp_call_function_single() should ensure program order when
++ * executing the remote function, which implies that it surrounds the
++ * function execution with :
++ * smp_mb()
++ * send IPI
++ * csd_lock_wait
++ * recv IPI
++ * smp_mb()
++ * exec. function
++ * smp_mb()
++ * csd unlock
++ * smp_mb()
++ *
++ * However, smp_call_function_single() does not seem to clearly execute
++ * such barriers. It depends on spinlock semantic to provide the barrier
++ * before executing the IPI and, when busy-looping, csd_lock_wait only
++ * executes smp_mb() when it has to wait for the other CPU.
++ *
++ * I don't trust this code. Therefore, let's add the smp_mb() sequence
++ * required ourself, even if duplicated. It has no performance impact
++ * anyway.
++ *
++ * smp_mb() is needed because smp_rmb() and smp_wmb() only order read vs
++ * read and write vs write. They do not ensure core synchronization. We
++ * really have to ensure total order between the 3 barriers running on
++ * the 2 CPUs.
++ */
++ if (config->ipi == RING_BUFFER_IPI_BARRIER) {
++ if (config->sync == RING_BUFFER_SYNC_PER_CPU
++ && config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
++ if (raw_smp_processor_id() != buf->backend.cpu) {
++ /* Total order with IPI handler smp_mb() */
++ smp_mb();
++ smp_call_function_single(buf->backend.cpu,
++ remote_mb, NULL, 1);
++ /* Total order with IPI handler smp_mb() */
++ smp_mb();
++ }
++ } else {
++ /* Total order with IPI handler smp_mb() */
++ smp_mb();
++ smp_call_function(remote_mb, NULL, 1);
++ /* Total order with IPI handler smp_mb() */
++ smp_mb();
++ }
++ } else {
++ /*
++ * Local rmb to match the remote wmb to read the commit count
++ * before the buffer data and the write offset.
++ */
++ smp_rmb();
++ }
++
++ write_offset = v_read(config, &buf->offset);
++
++ /*
++ * Check that the buffer we are getting is after or at consumed_cur
++ * position.
++ */
++ if ((long) subbuf_trunc(consumed, chan)
++ - (long) subbuf_trunc(consumed_cur, chan) < 0)
++ goto nodata;
++
++ /*
++ * Check that the subbuffer we are trying to consume has been
++ * already fully committed.
++ */
++ if (((commit_count - chan->backend.subbuf_size)
++ & chan->commit_count_mask)
++ - (buf_trunc(consumed, chan)
++ >> chan->backend.num_subbuf_order)
++ != 0)
++ goto nodata;
++
++ /*
++ * Check that we are not about to read the same subbuffer in
++ * which the writer head is.
++ */
++ if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed, chan)
++ == 0)
++ goto nodata;
++
++ /*
++ * Failure to get the subbuffer causes a busy-loop retry without going
++ * to a wait queue. These are caused by short-lived race windows where
++ * the writer is getting access to a subbuffer we were trying to get
++ * access to. Also checks that the "consumed" buffer count we are
++ * looking for matches the one contained in the subbuffer id.
++ */
++ ret = update_read_sb_index(config, &buf->backend, &chan->backend,
++ consumed_idx, buf_trunc_val(consumed, chan));
++ if (ret)
++ goto retry;
++ subbuffer_id_clear_noref(config, &buf->backend.buf_rsb.id);
++
++ buf->get_subbuf_consumed = consumed;
++ buf->get_subbuf = 1;
++
++ return 0;
++
++nodata:
++ /*
++ * The memory barriers __wait_event()/wake_up_interruptible() take care
++ * of "raw_spin_is_locked" memory ordering.
++ */
++ if (finalized)
++ return -ENODATA;
++ else if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
++ goto retry;
++ else
++ return -EAGAIN;
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_get_subbuf);
++
++/**
++ * lib_ring_buffer_put_subbuf - release exclusive subbuffer access
++ * @buf: ring buffer
++ */
++void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf)
++{
++ struct lib_ring_buffer_backend *bufb = &buf->backend;
++ struct channel *chan = bufb->chan;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ unsigned long read_sb_bindex, consumed_idx, consumed;
++
++ CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
++
++ if (!buf->get_subbuf) {
++ /*
++ * Reader puts a subbuffer it did not get.
++ */
++ CHAN_WARN_ON(chan, 1);
++ return;
++ }
++ consumed = buf->get_subbuf_consumed;
++ buf->get_subbuf = 0;
++
++ /*
++ * Clear the records_unread counter. (overruns counter)
++ * Can still be non-zero if a file reader simply grabbed the data
++ * without using iterators.
++ * Can be below zero if an iterator is used on a snapshot more than
++ * once.
++ */
++ read_sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
++ v_add(config, v_read(config,
++ &bufb->array[read_sb_bindex]->records_unread),
++ &bufb->records_read);
++ v_set(config, &bufb->array[read_sb_bindex]->records_unread, 0);
++ CHAN_WARN_ON(chan, config->mode == RING_BUFFER_OVERWRITE
++ && subbuffer_id_is_noref(config, bufb->buf_rsb.id));
++ subbuffer_id_set_noref(config, &bufb->buf_rsb.id);
++
++ /*
++ * Exchange the reader subbuffer with the one we put in its place in the
++ * writer subbuffer table. Expect the original consumed count. If
++ * update_read_sb_index fails, this is because the writer updated the
++ * subbuffer concurrently. We should therefore keep the subbuffer we
++ * currently have: it has become invalid to try reading this sub-buffer
++ * consumed count value anyway.
++ */
++ consumed_idx = subbuf_index(consumed, chan);
++ update_read_sb_index(config, &buf->backend, &chan->backend,
++ consumed_idx, buf_trunc_val(consumed, chan));
++ /*
++ * update_read_sb_index return value ignored. Don't exchange sub-buffer
++ * if the writer concurrently updated it.
++ */
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_put_subbuf);
++
++/*
++ * cons_offset is an iterator on all subbuffer offsets between the reader
++ * position and the writer position. (inclusive)
++ */
++static
++void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf,
++ struct channel *chan,
++ unsigned long cons_offset,
++ int cpu)
++{
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ unsigned long cons_idx, commit_count, commit_count_sb;
++
++ cons_idx = subbuf_index(cons_offset, chan);
++ commit_count = v_read(config, &buf->commit_hot[cons_idx].cc);
++ commit_count_sb = v_read(config, &buf->commit_cold[cons_idx].cc_sb);
++
++ if (subbuf_offset(commit_count, chan) != 0)
++ printk(KERN_WARNING
++ "ring buffer %s, cpu %d: "
++ "commit count in subbuffer %lu,\n"
++ "expecting multiples of %lu bytes\n"
++ " [ %lu bytes committed, %lu bytes reader-visible ]\n",
++ chan->backend.name, cpu, cons_idx,
++ chan->backend.subbuf_size,
++ commit_count, commit_count_sb);
++
++ printk(KERN_DEBUG "ring buffer: %s, cpu %d: %lu bytes committed\n",
++ chan->backend.name, cpu, commit_count);
++}
++
++static
++void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf,
++ struct channel *chan,
++ void *priv, int cpu)
++{
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ unsigned long write_offset, cons_offset;
++
++ /*
++ * No need to order commit_count, write_offset and cons_offset reads
++ * because we execute at teardown when no more writer nor reader
++ * references are left.
++ */
++ write_offset = v_read(config, &buf->offset);
++ cons_offset = atomic_long_read(&buf->consumed);
++ if (write_offset != cons_offset)
++ printk(KERN_DEBUG
++ "ring buffer %s, cpu %d: "
++ "non-consumed data\n"
++ " [ %lu bytes written, %lu bytes read ]\n",
++ chan->backend.name, cpu, write_offset, cons_offset);
++
++ for (cons_offset = atomic_long_read(&buf->consumed);
++ (long) (subbuf_trunc((unsigned long) v_read(config, &buf->offset),
++ chan)
++ - cons_offset) > 0;
++ cons_offset = subbuf_align(cons_offset, chan))
++ lib_ring_buffer_print_subbuffer_errors(buf, chan, cons_offset,
++ cpu);
++}
++
++static
++void lib_ring_buffer_print_errors(struct channel *chan,
++ struct lib_ring_buffer *buf, int cpu)
++{
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ void *priv = chan->backend.priv;
++
++ if (!strcmp(chan->backend.name, "relay-metadata")) {
++ printk(KERN_DEBUG "ring buffer %s: %lu records written, "
++ "%lu records overrun\n",
++ chan->backend.name,
++ v_read(config, &buf->records_count),
++ v_read(config, &buf->records_overrun));
++ } else {
++ printk(KERN_DEBUG "ring buffer %s, cpu %d: %lu records written, "
++ "%lu records overrun\n",
++ chan->backend.name, cpu,
++ v_read(config, &buf->records_count),
++ v_read(config, &buf->records_overrun));
++
++ if (v_read(config, &buf->records_lost_full)
++ || v_read(config, &buf->records_lost_wrap)
++ || v_read(config, &buf->records_lost_big))
++ printk(KERN_WARNING
++ "ring buffer %s, cpu %d: records were lost. Caused by:\n"
++ " [ %lu buffer full, %lu nest buffer wrap-around, "
++ "%lu event too big ]\n",
++ chan->backend.name, cpu,
++ v_read(config, &buf->records_lost_full),
++ v_read(config, &buf->records_lost_wrap),
++ v_read(config, &buf->records_lost_big));
++ }
++ lib_ring_buffer_print_buffer_errors(buf, chan, priv, cpu);
++}
++
++/*
++ * lib_ring_buffer_switch_old_start: Populate old subbuffer header.
++ *
++ * Only executed when the buffer is finalized, in SWITCH_FLUSH.
++ */
++static
++void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf,
++ struct channel *chan,
++ struct switch_offsets *offsets,
++ u64 tsc)
++{
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ unsigned long oldidx = subbuf_index(offsets->old, chan);
++ unsigned long commit_count;
++
++ config->cb.buffer_begin(buf, tsc, oldidx);
++
++ /*
++ * Order all writes to buffer before the commit count update that will
++ * determine that the subbuffer is full.
++ */
++ if (config->ipi == RING_BUFFER_IPI_BARRIER) {
++ /*
++ * Must write slot data before incrementing commit count. This
++ * compiler barrier is upgraded into a smp_mb() by the IPI sent
++ * by get_subbuf().
++ */
++ barrier();
++ } else
++ smp_wmb();
++ v_add(config, config->cb.subbuffer_header_size(),
++ &buf->commit_hot[oldidx].cc);
++ commit_count = v_read(config, &buf->commit_hot[oldidx].cc);
++ /* Check if the written buffer has to be delivered */
++ lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
++ commit_count, oldidx, tsc);
++ lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
++ offsets->old, commit_count,
++ config->cb.subbuffer_header_size());
++}
++
++/*
++ * lib_ring_buffer_switch_old_end: switch old subbuffer
++ *
++ * Note : offset_old should never be 0 here. It is ok, because we never perform
++ * buffer switch on an empty subbuffer in SWITCH_ACTIVE mode. The caller
++ * increments the offset_old value when doing a SWITCH_FLUSH on an empty
++ * subbuffer.
++ */
++static
++void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf,
++ struct channel *chan,
++ struct switch_offsets *offsets,
++ u64 tsc)
++{
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
++ unsigned long commit_count, padding_size, data_size;
++
++ data_size = subbuf_offset(offsets->old - 1, chan) + 1;
++ padding_size = chan->backend.subbuf_size - data_size;
++ subbuffer_set_data_size(config, &buf->backend, oldidx, data_size);
++
++ /*
++ * Order all writes to buffer before the commit count update that will
++ * determine that the subbuffer is full.
++ */
++ if (config->ipi == RING_BUFFER_IPI_BARRIER) {
++ /*
++ * Must write slot data before incrementing commit count. This
++ * compiler barrier is upgraded into a smp_mb() by the IPI sent
++ * by get_subbuf().
++ */
++ barrier();
++ } else
++ smp_wmb();
++ v_add(config, padding_size, &buf->commit_hot[oldidx].cc);
++ commit_count = v_read(config, &buf->commit_hot[oldidx].cc);
++ lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
++ commit_count, oldidx, tsc);
++ lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
++ offsets->old, commit_count,
++ padding_size);
++}
++
++/*
++ * lib_ring_buffer_switch_new_start: Populate new subbuffer.
++ *
++ * This code can be executed unordered : writers may already have written to the
++ * sub-buffer before this code gets executed, caution. The commit makes sure
++ * that this code is executed before the deliver of this sub-buffer.
++ */
++static
++void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf,
++ struct channel *chan,
++ struct switch_offsets *offsets,
++ u64 tsc)
++{
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ unsigned long beginidx = subbuf_index(offsets->begin, chan);
++ unsigned long commit_count;
++
++ config->cb.buffer_begin(buf, tsc, beginidx);
++
++ /*
++ * Order all writes to buffer before the commit count update that will
++ * determine that the subbuffer is full.
++ */
++ if (config->ipi == RING_BUFFER_IPI_BARRIER) {
++ /*
++ * Must write slot data before incrementing commit count. This
++ * compiler barrier is upgraded into a smp_mb() by the IPI sent
++ * by get_subbuf().
++ */
++ barrier();
++ } else
++ smp_wmb();
++ v_add(config, config->cb.subbuffer_header_size(),
++ &buf->commit_hot[beginidx].cc);
++ commit_count = v_read(config, &buf->commit_hot[beginidx].cc);
++ /* Check if the written buffer has to be delivered */
++ lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
++ commit_count, beginidx, tsc);
++ lib_ring_buffer_write_commit_counter(config, buf, chan, beginidx,
++ offsets->begin, commit_count,
++ config->cb.subbuffer_header_size());
++}
++
++/*
++ * lib_ring_buffer_switch_new_end: finish switching current subbuffer
++ *
++ * Calls subbuffer_set_data_size() to set the data size of the current
++ * sub-buffer. We do not need to perform check_deliver nor commit here,
++ * since this task will be done by the "commit" of the event for which
++ * we are currently doing the space reservation.
++ */
++static
++void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
++ struct channel *chan,
++ struct switch_offsets *offsets,
++ u64 tsc)
++{
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ unsigned long endidx, data_size;
++
++ endidx = subbuf_index(offsets->end - 1, chan);
++ data_size = subbuf_offset(offsets->end - 1, chan) + 1;
++ subbuffer_set_data_size(config, &buf->backend, endidx, data_size);
++}
++
++/*
++ * Returns :
++ * 0 if ok
++ * !0 if execution must be aborted.
++ */
++static
++int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
++ struct lib_ring_buffer *buf,
++ struct channel *chan,
++ struct switch_offsets *offsets,
++ u64 *tsc)
++{
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ unsigned long off, reserve_commit_diff;
++
++ offsets->begin = v_read(config, &buf->offset);
++ offsets->old = offsets->begin;
++ offsets->switch_old_start = 0;
++ off = subbuf_offset(offsets->begin, chan);
++
++ *tsc = config->cb.ring_buffer_clock_read(chan);
++
++ /*
++ * Ensure we flush the header of an empty subbuffer when doing the
++ * finalize (SWITCH_FLUSH). This ensures that we end up knowing the
++ * total data gathering duration even if there were no records saved
++ * after the last buffer switch.
++ * In SWITCH_ACTIVE mode, switch the buffer when it contains events.
++ * SWITCH_ACTIVE only flushes the current subbuffer, dealing with end of
++ * subbuffer header as appropriate.
++ * The next record that reserves space will be responsible for
++ * populating the following subbuffer header. We choose not to populate
++ * the next subbuffer header here because we want to be able to use
++ * SWITCH_ACTIVE for periodical buffer flush and CPU tick_nohz stop
++ * buffer flush, which must guarantee that all the buffer content
++ * (records and header timestamps) are visible to the reader. This is
++ * required for quiescence guarantees for the fusion merge.
++ */
++ if (mode != SWITCH_FLUSH && !off)
++ return -1; /* we do not have to switch : buffer is empty */
++
++ if (unlikely(off == 0)) {
++ unsigned long sb_index, commit_count;
++
++ /*
++ * We are performing a SWITCH_FLUSH. At this stage, there are no
++ * concurrent writes into the buffer.
++ *
++ * The client does not save any header information. Don't
++ * switch empty subbuffer on finalize, because it is invalid to
++ * deliver a completely empty subbuffer.
++ */
++ if (!config->cb.subbuffer_header_size())
++ return -1;
++
++ /* Test new buffer integrity */
++ sb_index = subbuf_index(offsets->begin, chan);
++ commit_count = v_read(config,
++ &buf->commit_cold[sb_index].cc_sb);
++ reserve_commit_diff =
++ (buf_trunc(offsets->begin, chan)
++ >> chan->backend.num_subbuf_order)
++ - (commit_count & chan->commit_count_mask);
++ if (likely(reserve_commit_diff == 0)) {
++ /* Next subbuffer not being written to. */
++ if (unlikely(config->mode != RING_BUFFER_OVERWRITE &&
++ subbuf_trunc(offsets->begin, chan)
++ - subbuf_trunc((unsigned long)
++ atomic_long_read(&buf->consumed), chan)
++ >= chan->backend.buf_size)) {
++ /*
++ * We do not overwrite non consumed buffers
++ * and we are full : don't switch.
++ */
++ return -1;
++ } else {
++ /*
++ * Next subbuffer not being written to, and we
++ * are either in overwrite mode or the buffer is
++ * not full. It's safe to write in this new
++ * subbuffer.
++ */
++ }
++ } else {
++ /*
++ * Next subbuffer reserve offset does not match the
++ * commit offset. Don't perform switch in
++ * producer-consumer and overwrite mode. Caused by
++ * either a writer OOPS or too many nested writes over a
++ * reserve/commit pair.
++ */
++ return -1;
++ }
++
++ /*
++ * Need to write the subbuffer start header on finalize.
++ */
++ offsets->switch_old_start = 1;
++ }
++ offsets->begin = subbuf_align(offsets->begin, chan);
++ /* Note: old points to the next subbuf at offset 0 */
++ offsets->end = offsets->begin;
++ return 0;
++}
++
++/*
++ * Force a sub-buffer switch. This operation is completely reentrant : can be
++ * called while tracing is active with absolutely no lock held.
++ *
++ * Note, however, that as a v_cmpxchg is used for some atomic
++ * operations, this function must be called from the CPU which owns the buffer
++ * for a ACTIVE flush.
++ */
++void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode)
++{
++ struct channel *chan = buf->backend.chan;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ struct switch_offsets offsets;
++ unsigned long oldidx;
++ u64 tsc;
++
++ offsets.size = 0;
++
++ /*
++ * Perform retryable operations.
++ */
++ do {
++ if (lib_ring_buffer_try_switch_slow(mode, buf, chan, &offsets,
++ &tsc))
++ return; /* Switch not needed */
++ } while (v_cmpxchg(config, &buf->offset, offsets.old, offsets.end)
++ != offsets.old);
++
++ /*
++ * Atomically update last_tsc. This update races against concurrent
++ * atomic updates, but the race will always cause supplementary full TSC
++ * records, never the opposite (missing a full TSC record when it would
++ * be needed).
++ */
++ save_last_tsc(config, buf, tsc);
++
++ /*
++ * Push the reader if necessary
++ */
++ lib_ring_buffer_reserve_push_reader(buf, chan, offsets.old);
++
++ oldidx = subbuf_index(offsets.old, chan);
++ lib_ring_buffer_clear_noref(config, &buf->backend, oldidx);
++
++ /*
++ * May need to populate header start on SWITCH_FLUSH.
++ */
++ if (offsets.switch_old_start) {
++ lib_ring_buffer_switch_old_start(buf, chan, &offsets, tsc);
++ offsets.old += config->cb.subbuffer_header_size();
++ }
++
++ /*
++ * Switch old subbuffer.
++ */
++ lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc);
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_slow);
++
++static void remote_switch(void *info)
++{
++ struct lib_ring_buffer *buf = info;
++
++ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
++}
++
++void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf)
++{
++ struct channel *chan = buf->backend.chan;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ int ret;
++
++ /*
++ * With global synchronization we don't need to use the IPI scheme.
++ */
++ if (config->sync == RING_BUFFER_SYNC_GLOBAL) {
++ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
++ return;
++ }
++
++ /*
++ * Taking lock on CPU hotplug to ensure two things: first, that the
++ * target cpu is not taken concurrently offline while we are within
++ * smp_call_function_single() (I don't trust that get_cpu() on the
++ * _local_ CPU actually inhibit CPU hotplug for the _remote_ CPU (to be
++ * confirmed)). Secondly, if it happens that the CPU is not online, our
++ * own call to lib_ring_buffer_switch_slow() needs to be protected from
++ * CPU hotplug handlers, which can also perform a remote subbuffer
++ * switch.
++ */
++ get_online_cpus();
++ ret = smp_call_function_single(buf->backend.cpu,
++ remote_switch, buf, 1);
++ if (ret) {
++ /* Remote CPU is offline, do it ourself. */
++ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
++ }
++ put_online_cpus();
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_remote);
++
++/*
++ * Returns :
++ * 0 if ok
++ * -ENOSPC if event size is too large for packet.
++ * -ENOBUFS if there is currently not enough space in buffer for the event.
++ * -EIO if data cannot be written into the buffer for any other reason.
++ */
++static
++int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
++ struct channel *chan,
++ struct switch_offsets *offsets,
++ struct lib_ring_buffer_ctx *ctx)
++{
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ unsigned long reserve_commit_diff, offset_cmp;
++
++retry:
++ offsets->begin = offset_cmp = v_read(config, &buf->offset);
++ offsets->old = offsets->begin;
++ offsets->switch_new_start = 0;
++ offsets->switch_new_end = 0;
++ offsets->switch_old_end = 0;
++ offsets->pre_header_padding = 0;
++
++ ctx->tsc = config->cb.ring_buffer_clock_read(chan);
++ if ((int64_t) ctx->tsc == -EIO)
++ return -EIO;
++
++ if (last_tsc_overflow(config, buf, ctx->tsc))
++ ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
++
++ if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
++ offsets->switch_new_start = 1; /* For offsets->begin */
++ } else {
++ offsets->size = config->cb.record_header_size(config, chan,
++ offsets->begin,
++ &offsets->pre_header_padding,
++ ctx);
++ offsets->size +=
++ lib_ring_buffer_align(offsets->begin + offsets->size,
++ ctx->largest_align)
++ + ctx->data_size;
++ if (unlikely(subbuf_offset(offsets->begin, chan) +
++ offsets->size > chan->backend.subbuf_size)) {
++ offsets->switch_old_end = 1; /* For offsets->old */
++ offsets->switch_new_start = 1; /* For offsets->begin */
++ }
++ }
++ if (unlikely(offsets->switch_new_start)) {
++ unsigned long sb_index, commit_count;
++
++ /*
++ * We are typically not filling the previous buffer completely.
++ */
++ if (likely(offsets->switch_old_end))
++ offsets->begin = subbuf_align(offsets->begin, chan);
++ offsets->begin = offsets->begin
++ + config->cb.subbuffer_header_size();
++ /* Test new buffer integrity */
++ sb_index = subbuf_index(offsets->begin, chan);
++ /*
++ * Read buf->offset before buf->commit_cold[sb_index].cc_sb.
++ * lib_ring_buffer_check_deliver() has the matching
++ * memory barriers required around commit_cold cc_sb
++ * updates to ensure reserve and commit counter updates
++ * are not seen reordered when updated by another CPU.
++ */
++ smp_rmb();
++ commit_count = v_read(config,
++ &buf->commit_cold[sb_index].cc_sb);
++ /* Read buf->commit_cold[sb_index].cc_sb before buf->offset. */
++ smp_rmb();
++ if (unlikely(offset_cmp != v_read(config, &buf->offset))) {
++ /*
++ * The reserve counter have been concurrently updated
++ * while we read the commit counter. This means the
++ * commit counter we read might not match buf->offset
++ * due to concurrent update. We therefore need to retry.
++ */
++ goto retry;
++ }
++ reserve_commit_diff =
++ (buf_trunc(offsets->begin, chan)
++ >> chan->backend.num_subbuf_order)
++ - (commit_count & chan->commit_count_mask);
++ if (likely(reserve_commit_diff == 0)) {
++ /* Next subbuffer not being written to. */
++ if (unlikely(config->mode != RING_BUFFER_OVERWRITE &&
++ subbuf_trunc(offsets->begin, chan)
++ - subbuf_trunc((unsigned long)
++ atomic_long_read(&buf->consumed), chan)
++ >= chan->backend.buf_size)) {
++ /*
++ * We do not overwrite non consumed buffers
++ * and we are full : record is lost.
++ */
++ v_inc(config, &buf->records_lost_full);
++ return -ENOBUFS;
++ } else {
++ /*
++ * Next subbuffer not being written to, and we
++ * are either in overwrite mode or the buffer is
++ * not full. It's safe to write in this new
++ * subbuffer.
++ */
++ }
++ } else {
++ /*
++ * Next subbuffer reserve offset does not match the
++ * commit offset, and this did not involve update to the
++ * reserve counter. Drop record in producer-consumer and
++ * overwrite mode. Caused by either a writer OOPS or
++ * too many nested writes over a reserve/commit pair.
++ */
++ v_inc(config, &buf->records_lost_wrap);
++ return -EIO;
++ }
++ offsets->size =
++ config->cb.record_header_size(config, chan,
++ offsets->begin,
++ &offsets->pre_header_padding,
++ ctx);
++ offsets->size +=
++ lib_ring_buffer_align(offsets->begin + offsets->size,
++ ctx->largest_align)
++ + ctx->data_size;
++ if (unlikely(subbuf_offset(offsets->begin, chan)
++ + offsets->size > chan->backend.subbuf_size)) {
++ /*
++ * Record too big for subbuffers, report error, don't
++ * complete the sub-buffer switch.
++ */
++ v_inc(config, &buf->records_lost_big);
++ return -ENOSPC;
++ } else {
++ /*
++ * We just made a successful buffer switch and the
++ * record fits in the new subbuffer. Let's write.
++ */
++ }
++ } else {
++ /*
++ * Record fits in the current buffer and we are not on a switch
++ * boundary. It's safe to write.
++ */
++ }
++ offsets->end = offsets->begin + offsets->size;
++
++ if (unlikely(subbuf_offset(offsets->end, chan) == 0)) {
++ /*
++ * The offset_end will fall at the very beginning of the next
++ * subbuffer.
++ */
++ offsets->switch_new_end = 1; /* For offsets->begin */
++ }
++ return 0;
++}
++
++/**
++ * lib_ring_buffer_reserve_slow - Atomic slot reservation in a buffer.
++ * @ctx: ring buffer context.
++ *
++ * Return : -NOBUFS if not enough space, -ENOSPC if event size too large,
++ * -EIO for other errors, else returns 0.
++ * It will take care of sub-buffer switching.
++ */
++int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx)
++{
++ struct channel *chan = ctx->chan;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ struct lib_ring_buffer *buf;
++ struct switch_offsets offsets;
++ int ret;
++
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
++ buf = per_cpu_ptr(chan->backend.buf, ctx->cpu);
++ else
++ buf = chan->backend.buf;
++ ctx->buf = buf;
++
++ offsets.size = 0;
++
++ do {
++ ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
++ ctx);
++ if (unlikely(ret))
++ return ret;
++ } while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
++ offsets.end)
++ != offsets.old));
++
++ /*
++ * Atomically update last_tsc. This update races against concurrent
++ * atomic updates, but the race will always cause supplementary full TSC
++ * records, never the opposite (missing a full TSC record when it would
++ * be needed).
++ */
++ save_last_tsc(config, buf, ctx->tsc);
++
++ /*
++ * Push the reader if necessary
++ */
++ lib_ring_buffer_reserve_push_reader(buf, chan, offsets.end - 1);
++
++ /*
++ * Clear noref flag for this subbuffer.
++ */
++ lib_ring_buffer_clear_noref(config, &buf->backend,
++ subbuf_index(offsets.end - 1, chan));
++
++ /*
++ * Switch old subbuffer if needed.
++ */
++ if (unlikely(offsets.switch_old_end)) {
++ lib_ring_buffer_clear_noref(config, &buf->backend,
++ subbuf_index(offsets.old - 1, chan));
++ lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->tsc);
++ }
++
++ /*
++ * Populate new subbuffer.
++ */
++ if (unlikely(offsets.switch_new_start))
++ lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc);
++
++ if (unlikely(offsets.switch_new_end))
++ lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc);
++
++ ctx->slot_size = offsets.size;
++ ctx->pre_offset = offsets.begin;
++ ctx->buf_offset = offsets.begin + offsets.pre_header_padding;
++ return 0;
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_reserve_slow);
++
++int __init init_lib_ring_buffer_frontend(void)
++{
++ int cpu;
++
++ for_each_possible_cpu(cpu)
++ spin_lock_init(&per_cpu(ring_buffer_nohz_lock, cpu));
++ return 0;
++}
++
++module_init(init_lib_ring_buffer_frontend);
++
++void __exit exit_lib_ring_buffer_frontend(void)
++{
++}
++
++module_exit(exit_lib_ring_buffer_frontend);
+--- /dev/null
++++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_iterator.c
+@@ -0,0 +1,810 @@
++/*
++ * ring_buffer_iterator.c
++ *
++ * Ring buffer and channel iterators. Get each event of a channel in order. Uses
++ * a prio heap for per-cpu buffers, giving a O(log(NR_CPUS)) algorithmic
++ * complexity for the "get next event" operation.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ *
++ * Author:
++ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ */
++
++#include "../../wrapper/ringbuffer/iterator.h"
++#include <linux/jiffies.h>
++#include <linux/delay.h>
++#include <linux/module.h>
++
++/*
++ * Safety factor taking into account internal kernel interrupt latency.
++ * Assuming 250ms worse-case latency.
++ */
++#define MAX_SYSTEM_LATENCY 250
++
++/*
++ * Maximum delta expected between trace clocks. At most 1 jiffy delta.
++ */
++#define MAX_CLOCK_DELTA (jiffies_to_usecs(1) * 1000)
++
++/**
++ * lib_ring_buffer_get_next_record - Get the next record in a buffer.
++ * @chan: channel
++ * @buf: buffer
++ *
++ * Returns the size of the event read, -EAGAIN if buffer is empty, -ENODATA if
++ * buffer is empty and finalized. The buffer must already be opened for reading.
++ */
++ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
++ struct lib_ring_buffer *buf)
++{
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ struct lib_ring_buffer_iter *iter = &buf->iter;
++ int ret;
++
++restart:
++ switch (iter->state) {
++ case ITER_GET_SUBBUF:
++ ret = lib_ring_buffer_get_next_subbuf(buf);
++ if (ret && !ACCESS_ONCE(buf->finalized)
++ && config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
++ /*
++ * Use "pull" scheme for global buffers. The reader
++ * itself flushes the buffer to "pull" data not visible
++ * to readers yet. Flush current subbuffer and re-try.
++ *
++ * Per-CPU buffers rather use a "push" scheme because
++ * the IPI needed to flush all CPU's buffers is too
++ * costly. In the "push" scheme, the reader waits for
++ * the writer periodic deferrable timer to flush the
++ * buffers (keeping track of a quiescent state
++ * timestamp). Therefore, the writer "pushes" data out
++ * of the buffers rather than letting the reader "pull"
++ * data from the buffer.
++ */
++ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
++ ret = lib_ring_buffer_get_next_subbuf(buf);
++ }
++ if (ret)
++ return ret;
++ iter->consumed = buf->cons_snapshot;
++ iter->data_size = lib_ring_buffer_get_read_data_size(config, buf);
++ iter->read_offset = iter->consumed;
++ /* skip header */
++ iter->read_offset += config->cb.subbuffer_header_size();
++ iter->state = ITER_TEST_RECORD;
++ goto restart;
++ case ITER_TEST_RECORD:
++ if (iter->read_offset - iter->consumed >= iter->data_size) {
++ iter->state = ITER_PUT_SUBBUF;
++ } else {
++ CHAN_WARN_ON(chan, !config->cb.record_get);
++ config->cb.record_get(config, chan, buf,
++ iter->read_offset,
++ &iter->header_len,
++ &iter->payload_len,
++ &iter->timestamp);
++ iter->read_offset += iter->header_len;
++ subbuffer_consume_record(config, &buf->backend);
++ iter->state = ITER_NEXT_RECORD;
++ return iter->payload_len;
++ }
++ goto restart;
++ case ITER_NEXT_RECORD:
++ iter->read_offset += iter->payload_len;
++ iter->state = ITER_TEST_RECORD;
++ goto restart;
++ case ITER_PUT_SUBBUF:
++ lib_ring_buffer_put_next_subbuf(buf);
++ iter->state = ITER_GET_SUBBUF;
++ goto restart;
++ default:
++ CHAN_WARN_ON(chan, 1); /* Should not happen */
++ return -EPERM;
++ }
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_get_next_record);
++
++static int buf_is_higher(void *a, void *b)
++{
++ struct lib_ring_buffer *bufa = a;
++ struct lib_ring_buffer *bufb = b;
++
++ /* Consider lowest timestamps to be at the top of the heap */
++ return (bufa->iter.timestamp < bufb->iter.timestamp);
++}
++
++static
++void lib_ring_buffer_get_empty_buf_records(const struct lib_ring_buffer_config *config,
++ struct channel *chan)
++{
++ struct lttng_ptr_heap *heap = &chan->iter.heap;
++ struct lib_ring_buffer *buf, *tmp;
++ ssize_t len;
++
++ list_for_each_entry_safe(buf, tmp, &chan->iter.empty_head,
++ iter.empty_node) {
++ len = lib_ring_buffer_get_next_record(chan, buf);
++
++ /*
++ * Deal with -EAGAIN and -ENODATA.
++ * len >= 0 means record contains data.
++ * -EBUSY should never happen, because we support only one
++ * reader.
++ */
++ switch (len) {
++ case -EAGAIN:
++ /* Keep node in empty list */
++ break;
++ case -ENODATA:
++ /*
++ * Buffer is finalized. Don't add to list of empty
++ * buffer, because it has no more data to provide, ever.
++ */
++ list_del(&buf->iter.empty_node);
++ break;
++ case -EBUSY:
++ CHAN_WARN_ON(chan, 1);
++ break;
++ default:
++ /*
++ * Insert buffer into the heap, remove from empty buffer
++ * list.
++ */
++ CHAN_WARN_ON(chan, len < 0);
++ list_del(&buf->iter.empty_node);
++ CHAN_WARN_ON(chan, lttng_heap_insert(heap, buf));
++ }
++ }
++}
++
++static
++void lib_ring_buffer_wait_for_qs(const struct lib_ring_buffer_config *config,
++ struct channel *chan)
++{
++ u64 timestamp_qs;
++ unsigned long wait_msecs;
++
++ /*
++ * No need to wait if no empty buffers are present.
++ */
++ if (list_empty(&chan->iter.empty_head))
++ return;
++
++ timestamp_qs = config->cb.ring_buffer_clock_read(chan);
++ /*
++ * We need to consider previously empty buffers.
++ * Do a get next buf record on each of them. Add them to
++ * the heap if they have data. If at least one of them
++ * don't have data, we need to wait for
++ * switch_timer_interval + MAX_SYSTEM_LATENCY (so we are sure the
++ * buffers have been switched either by the timer or idle entry) and
++ * check them again, adding them if they have data.
++ */
++ lib_ring_buffer_get_empty_buf_records(config, chan);
++
++ /*
++ * No need to wait if no empty buffers are present.
++ */
++ if (list_empty(&chan->iter.empty_head))
++ return;
++
++ /*
++ * We need to wait for the buffer switch timer to run. If the
++ * CPU is idle, idle entry performed the switch.
++ * TODO: we could optimize further by skipping the sleep if all
++ * empty buffers belong to idle or offline cpus.
++ */
++ wait_msecs = jiffies_to_msecs(chan->switch_timer_interval);
++ wait_msecs += MAX_SYSTEM_LATENCY;
++ msleep(wait_msecs);
++ lib_ring_buffer_get_empty_buf_records(config, chan);
++ /*
++ * Any buffer still in the empty list here cannot possibly
++ * contain an event with a timestamp prior to "timestamp_qs".
++ * The new quiescent state timestamp is the one we grabbed
++ * before waiting for buffer data. It is therefore safe to
++ * ignore empty buffers up to last_qs timestamp for fusion
++ * merge.
++ */
++ chan->iter.last_qs = timestamp_qs;
++}
++
++/**
++ * channel_get_next_record - Get the next record in a channel.
++ * @chan: channel
++ * @ret_buf: the buffer in which the event is located (output)
++ *
++ * Returns the size of new current event, -EAGAIN if all buffers are empty,
++ * -ENODATA if all buffers are empty and finalized. The channel must already be
++ * opened for reading.
++ */
++
++ssize_t channel_get_next_record(struct channel *chan,
++ struct lib_ring_buffer **ret_buf)
++{
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ struct lib_ring_buffer *buf;
++ struct lttng_ptr_heap *heap;
++ ssize_t len;
++
++ if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
++ *ret_buf = channel_get_ring_buffer(config, chan, 0);
++ return lib_ring_buffer_get_next_record(chan, *ret_buf);
++ }
++
++ heap = &chan->iter.heap;
++
++ /*
++ * get next record for topmost buffer.
++ */
++ buf = lttng_heap_maximum(heap);
++ if (buf) {
++ len = lib_ring_buffer_get_next_record(chan, buf);
++ /*
++ * Deal with -EAGAIN and -ENODATA.
++ * len >= 0 means record contains data.
++ */
++ switch (len) {
++ case -EAGAIN:
++ buf->iter.timestamp = 0;
++ list_add(&buf->iter.empty_node, &chan->iter.empty_head);
++ /* Remove topmost buffer from the heap */
++ CHAN_WARN_ON(chan, lttng_heap_remove(heap) != buf);
++ break;
++ case -ENODATA:
++ /*
++ * Buffer is finalized. Remove buffer from heap and
++ * don't add to list of empty buffer, because it has no
++ * more data to provide, ever.
++ */
++ CHAN_WARN_ON(chan, lttng_heap_remove(heap) != buf);
++ break;
++ case -EBUSY:
++ CHAN_WARN_ON(chan, 1);
++ break;
++ default:
++ /*
++ * Reinsert buffer into the heap. Note that heap can be
++ * partially empty, so we need to use
++ * lttng_heap_replace_max().
++ */
++ CHAN_WARN_ON(chan, len < 0);
++ CHAN_WARN_ON(chan, lttng_heap_replace_max(heap, buf) != buf);
++ break;
++ }
++ }
++
++ buf = lttng_heap_maximum(heap);
++ if (!buf || buf->iter.timestamp > chan->iter.last_qs) {
++ /*
++ * Deal with buffers previously showing no data.
++ * Add buffers containing data to the heap, update
++ * last_qs.
++ */
++ lib_ring_buffer_wait_for_qs(config, chan);
++ }
++
++ *ret_buf = buf = lttng_heap_maximum(heap);
++ if (buf) {
++ /*
++ * If this warning triggers, you probably need to check your
++ * system interrupt latency. Typical causes: too many printk()
++ * output going to a serial console with interrupts off.
++ * Allow for MAX_CLOCK_DELTA ns timestamp delta going backward.
++ * Observed on SMP KVM setups with trace_clock().
++ */
++ if (chan->iter.last_timestamp
++ > (buf->iter.timestamp + MAX_CLOCK_DELTA)) {
++ printk(KERN_WARNING "ring_buffer: timestamps going "
++ "backward. Last time %llu ns, cpu %d, "
++ "current time %llu ns, cpu %d, "
++ "delta %llu ns.\n",
++ chan->iter.last_timestamp, chan->iter.last_cpu,
++ buf->iter.timestamp, buf->backend.cpu,
++ chan->iter.last_timestamp - buf->iter.timestamp);
++ CHAN_WARN_ON(chan, 1);
++ }
++ chan->iter.last_timestamp = buf->iter.timestamp;
++ chan->iter.last_cpu = buf->backend.cpu;
++ return buf->iter.payload_len;
++ } else {
++ /* Heap is empty */
++ if (list_empty(&chan->iter.empty_head))
++ return -ENODATA; /* All buffers finalized */
++ else
++ return -EAGAIN; /* Temporarily empty */
++ }
++}
++EXPORT_SYMBOL_GPL(channel_get_next_record);
++
++static
++void lib_ring_buffer_iterator_init(struct channel *chan, struct lib_ring_buffer *buf)
++{
++ if (buf->iter.allocated)
++ return;
++
++ buf->iter.allocated = 1;
++ if (chan->iter.read_open && !buf->iter.read_open) {
++ CHAN_WARN_ON(chan, lib_ring_buffer_open_read(buf) != 0);
++ buf->iter.read_open = 1;
++ }
++
++ /* Add to list of buffers without any current record */
++ if (chan->backend.config.alloc == RING_BUFFER_ALLOC_PER_CPU)
++ list_add(&buf->iter.empty_node, &chan->iter.empty_head);
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static
++int channel_iterator_cpu_hotplug(struct notifier_block *nb,
++ unsigned long action,
++ void *hcpu)
++{
++ unsigned int cpu = (unsigned long)hcpu;
++ struct channel *chan = container_of(nb, struct channel,
++ hp_iter_notifier);
++ struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++
++ if (!chan->hp_iter_enable)
++ return NOTIFY_DONE;
++
++ CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
++
++ switch (action) {
++ case CPU_DOWN_FAILED:
++ case CPU_DOWN_FAILED_FROZEN:
++ case CPU_ONLINE:
++ case CPU_ONLINE_FROZEN:
++ lib_ring_buffer_iterator_init(chan, buf);
++ return NOTIFY_OK;
++ default:
++ return NOTIFY_DONE;
++ }
++}
++#endif
++
++int channel_iterator_init(struct channel *chan)
++{
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ struct lib_ring_buffer *buf;
++
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
++ int cpu, ret;
++
++ INIT_LIST_HEAD(&chan->iter.empty_head);
++ ret = lttng_heap_init(&chan->iter.heap,
++ num_possible_cpus(),
++ GFP_KERNEL, buf_is_higher);
++ if (ret)
++ return ret;
++ /*
++ * In case of non-hotplug cpu, if the ring-buffer is allocated
++ * in early initcall, it will not be notified of secondary cpus.
++ * In that off case, we need to allocate for all possible cpus.
++ */
++#ifdef CONFIG_HOTPLUG_CPU
++ chan->hp_iter_notifier.notifier_call =
++ channel_iterator_cpu_hotplug;
++ chan->hp_iter_notifier.priority = 10;
++ register_cpu_notifier(&chan->hp_iter_notifier);
++ get_online_cpus();
++ for_each_online_cpu(cpu) {
++ buf = per_cpu_ptr(chan->backend.buf, cpu);
++ lib_ring_buffer_iterator_init(chan, buf);
++ }
++ chan->hp_iter_enable = 1;
++ put_online_cpus();
++#else
++ for_each_possible_cpu(cpu) {
++ buf = per_cpu_ptr(chan->backend.buf, cpu);
++ lib_ring_buffer_iterator_init(chan, buf);
++ }
++#endif
++ } else {
++ buf = channel_get_ring_buffer(config, chan, 0);
++ lib_ring_buffer_iterator_init(chan, buf);
++ }
++ return 0;
++}
++
++void channel_iterator_unregister_notifiers(struct channel *chan)
++{
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
++ chan->hp_iter_enable = 0;
++ unregister_cpu_notifier(&chan->hp_iter_notifier);
++ }
++}
++
++void channel_iterator_free(struct channel *chan)
++{
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
++ lttng_heap_free(&chan->iter.heap);
++}
++
++int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf)
++{
++ struct channel *chan = buf->backend.chan;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
++ return lib_ring_buffer_open_read(buf);
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_open);
++
++/*
++ * Note: Iterators must not be mixed with other types of outputs, because an
++ * iterator can leave the buffer in "GET" state, which is not consistent with
++ * other types of output (mmap, splice, raw data read).
++ */
++void lib_ring_buffer_iterator_release(struct lib_ring_buffer *buf)
++{
++ lib_ring_buffer_release_read(buf);
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_release);
++
++int channel_iterator_open(struct channel *chan)
++{
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ struct lib_ring_buffer *buf;
++ int ret = 0, cpu;
++
++ CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
++
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
++ get_online_cpus();
++ /* Allow CPU hotplug to keep track of opened reader */
++ chan->iter.read_open = 1;
++ for_each_channel_cpu(cpu, chan) {
++ buf = channel_get_ring_buffer(config, chan, cpu);
++ ret = lib_ring_buffer_iterator_open(buf);
++ if (ret)
++ goto error;
++ buf->iter.read_open = 1;
++ }
++ put_online_cpus();
++ } else {
++ buf = channel_get_ring_buffer(config, chan, 0);
++ ret = lib_ring_buffer_iterator_open(buf);
++ }
++ return ret;
++error:
++ /* Error should always happen on CPU 0, hence no close is required. */
++ CHAN_WARN_ON(chan, cpu != 0);
++ put_online_cpus();
++ return ret;
++}
++EXPORT_SYMBOL_GPL(channel_iterator_open);
++
++void channel_iterator_release(struct channel *chan)
++{
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ struct lib_ring_buffer *buf;
++ int cpu;
++
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
++ get_online_cpus();
++ for_each_channel_cpu(cpu, chan) {
++ buf = channel_get_ring_buffer(config, chan, cpu);
++ if (buf->iter.read_open) {
++ lib_ring_buffer_iterator_release(buf);
++ buf->iter.read_open = 0;
++ }
++ }
++ chan->iter.read_open = 0;
++ put_online_cpus();
++ } else {
++ buf = channel_get_ring_buffer(config, chan, 0);
++ lib_ring_buffer_iterator_release(buf);
++ }
++}
++EXPORT_SYMBOL_GPL(channel_iterator_release);
++
++void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf)
++{
++ struct channel *chan = buf->backend.chan;
++
++ if (buf->iter.state != ITER_GET_SUBBUF)
++ lib_ring_buffer_put_next_subbuf(buf);
++ buf->iter.state = ITER_GET_SUBBUF;
++ /* Remove from heap (if present). */
++ if (lttng_heap_cherrypick(&chan->iter.heap, buf))
++ list_add(&buf->iter.empty_node, &chan->iter.empty_head);
++ buf->iter.timestamp = 0;
++ buf->iter.header_len = 0;
++ buf->iter.payload_len = 0;
++ buf->iter.consumed = 0;
++ buf->iter.read_offset = 0;
++ buf->iter.data_size = 0;
++ /* Don't reset allocated and read_open */
++}
++
++void channel_iterator_reset(struct channel *chan)
++{
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ struct lib_ring_buffer *buf;
++ int cpu;
++
++ /* Empty heap, put into empty_head */
++ while ((buf = lttng_heap_remove(&chan->iter.heap)) != NULL)
++ list_add(&buf->iter.empty_node, &chan->iter.empty_head);
++
++ for_each_channel_cpu(cpu, chan) {
++ buf = channel_get_ring_buffer(config, chan, cpu);
++ lib_ring_buffer_iterator_reset(buf);
++ }
++ /* Don't reset read_open */
++ chan->iter.last_qs = 0;
++ chan->iter.last_timestamp = 0;
++ chan->iter.last_cpu = 0;
++ chan->iter.len_left = 0;
++}
++
++/*
++ * Ring buffer payload extraction read() implementation.
++ */
++static
++ssize_t channel_ring_buffer_file_read(struct file *filp,
++ char __user *user_buf,
++ size_t count,
++ loff_t *ppos,
++ struct channel *chan,
++ struct lib_ring_buffer *buf,
++ int fusionmerge)
++{
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ size_t read_count = 0, read_offset;
++ ssize_t len;
++
++ might_sleep();
++ if (!access_ok(VERIFY_WRITE, user_buf, count))
++ return -EFAULT;
++
++ /* Finish copy of previous record */
++ if (*ppos != 0) {
++ if (read_count < count) {
++ len = chan->iter.len_left;
++ read_offset = *ppos;
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU
++ && fusionmerge)
++ buf = lttng_heap_maximum(&chan->iter.heap);
++ CHAN_WARN_ON(chan, !buf);
++ goto skip_get_next;
++ }
++ }
++
++ while (read_count < count) {
++ size_t copy_len, space_left;
++
++ if (fusionmerge)
++ len = channel_get_next_record(chan, &buf);
++ else
++ len = lib_ring_buffer_get_next_record(chan, buf);
++len_test:
++ if (len < 0) {
++ /*
++ * Check if buffer is finalized (end of file).
++ */
++ if (len == -ENODATA) {
++ /* A 0 read_count will tell about end of file */
++ goto nodata;
++ }
++ if (filp->f_flags & O_NONBLOCK) {
++ if (!read_count)
++ read_count = -EAGAIN;
++ goto nodata;
++ } else {
++ int error;
++
++ /*
++ * No data available at the moment, return what
++ * we got.
++ */
++ if (read_count)
++ goto nodata;
++
++ /*
++ * Wait for returned len to be >= 0 or -ENODATA.
++ */
++ if (fusionmerge)
++ error = wait_event_interruptible(
++ chan->read_wait,
++ ((len = channel_get_next_record(chan,
++ &buf)), len != -EAGAIN));
++ else
++ error = wait_event_interruptible(
++ buf->read_wait,
++ ((len = lib_ring_buffer_get_next_record(
++ chan, buf)), len != -EAGAIN));
++ CHAN_WARN_ON(chan, len == -EBUSY);
++ if (error) {
++ read_count = error;
++ goto nodata;
++ }
++ CHAN_WARN_ON(chan, len < 0 && len != -ENODATA);
++ goto len_test;
++ }
++ }
++ read_offset = buf->iter.read_offset;
++skip_get_next:
++ space_left = count - read_count;
++ if (len <= space_left) {
++ copy_len = len;
++ chan->iter.len_left = 0;
++ *ppos = 0;
++ } else {
++ copy_len = space_left;
++ chan->iter.len_left = len - copy_len;
++ *ppos = read_offset + copy_len;
++ }
++ if (__lib_ring_buffer_copy_to_user(&buf->backend, read_offset,
++ &user_buf[read_count],
++ copy_len)) {
++ /*
++ * Leave the len_left and ppos values at their current
++ * state, as we currently have a valid event to read.
++ */
++ return -EFAULT;
++ }
++ read_count += copy_len;
++ };
++ return read_count;
++
++nodata:
++ *ppos = 0;
++ chan->iter.len_left = 0;
++ return read_count;
++}
++
++/**
++ * lib_ring_buffer_file_read - Read buffer record payload.
++ * @filp: file structure pointer.
++ * @buffer: user buffer to read data into.
++ * @count: number of bytes to read.
++ * @ppos: file read position.
++ *
++ * Returns a negative value on error, or the number of bytes read on success.
++ * ppos is used to save the position _within the current record_ between calls
++ * to read().
++ */
++static
++ssize_t lib_ring_buffer_file_read(struct file *filp,
++ char __user *user_buf,
++ size_t count,
++ loff_t *ppos)
++{
++ struct inode *inode = filp->f_dentry->d_inode;
++ struct lib_ring_buffer *buf = inode->i_private;
++ struct channel *chan = buf->backend.chan;
++
++ return channel_ring_buffer_file_read(filp, user_buf, count, ppos,
++ chan, buf, 0);
++}
++
++/**
++ * channel_file_read - Read channel record payload.
++ * @filp: file structure pointer.
++ * @buffer: user buffer to read data into.
++ * @count: number of bytes to read.
++ * @ppos: file read position.
++ *
++ * Returns a negative value on error, or the number of bytes read on success.
++ * ppos is used to save the position _within the current record_ between calls
++ * to read().
++ */
++static
++ssize_t channel_file_read(struct file *filp,
++ char __user *user_buf,
++ size_t count,
++ loff_t *ppos)
++{
++ struct inode *inode = filp->f_dentry->d_inode;
++ struct channel *chan = inode->i_private;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++
++ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
++ return channel_ring_buffer_file_read(filp, user_buf, count,
++ ppos, chan, NULL, 1);
++ else {
++ struct lib_ring_buffer *buf =
++ channel_get_ring_buffer(config, chan, 0);
++ return channel_ring_buffer_file_read(filp, user_buf, count,
++ ppos, chan, buf, 0);
++ }
++}
++
++static
++int lib_ring_buffer_file_open(struct inode *inode, struct file *file)
++{
++ struct lib_ring_buffer *buf = inode->i_private;
++ int ret;
++
++ ret = lib_ring_buffer_iterator_open(buf);
++ if (ret)
++ return ret;
++
++ file->private_data = buf;
++ ret = nonseekable_open(inode, file);
++ if (ret)
++ goto release_iter;
++ return 0;
++
++release_iter:
++ lib_ring_buffer_iterator_release(buf);
++ return ret;
++}
++
++static
++int lib_ring_buffer_file_release(struct inode *inode, struct file *file)
++{
++ struct lib_ring_buffer *buf = inode->i_private;
++
++ lib_ring_buffer_iterator_release(buf);
++ return 0;
++}
++
++static
++int channel_file_open(struct inode *inode, struct file *file)
++{
++ struct channel *chan = inode->i_private;
++ int ret;
++
++ ret = channel_iterator_open(chan);
++ if (ret)
++ return ret;
++
++ file->private_data = chan;
++ ret = nonseekable_open(inode, file);
++ if (ret)
++ goto release_iter;
++ return 0;
++
++release_iter:
++ channel_iterator_release(chan);
++ return ret;
++}
++
++static
++int channel_file_release(struct inode *inode, struct file *file)
++{
++ struct channel *chan = inode->i_private;
++
++ channel_iterator_release(chan);
++ return 0;
++}
++
++const struct file_operations channel_payload_file_operations = {
++ .owner = THIS_MODULE,
++ .open = channel_file_open,
++ .release = channel_file_release,
++ .read = channel_file_read,
++ .llseek = vfs_lib_ring_buffer_no_llseek,
++};
++EXPORT_SYMBOL_GPL(channel_payload_file_operations);
++
++const struct file_operations lib_ring_buffer_payload_file_operations = {
++ .owner = THIS_MODULE,
++ .open = lib_ring_buffer_file_open,
++ .release = lib_ring_buffer_file_release,
++ .read = lib_ring_buffer_file_read,
++ .llseek = vfs_lib_ring_buffer_no_llseek,
++};
++EXPORT_SYMBOL_GPL(lib_ring_buffer_payload_file_operations);
+--- /dev/null
++++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c
+@@ -0,0 +1,128 @@
++/*
++ * ring_buffer_mmap.c
++ *
++ * Copyright (C) 2002-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
++ * Copyright (C) 1999-2005 - Karim Yaghmour <karim@opersys.com>
++ * Copyright (C) 2008-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; only version 2 of the License.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Re-using code from kernel/relay.c, hence the GPLv2 license for this
++ * file.
++ */
++
++#include <linux/module.h>
++#include <linux/mm.h>
++
++#include "../../wrapper/ringbuffer/backend.h"
++#include "../../wrapper/ringbuffer/frontend.h"
++#include "../../wrapper/ringbuffer/vfs.h"
++
++/*
++ * fault() vm_op implementation for ring buffer file mapping.
++ */
++static int lib_ring_buffer_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ struct lib_ring_buffer *buf = vma->vm_private_data;
++ struct channel *chan = buf->backend.chan;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ pgoff_t pgoff = vmf->pgoff;
++ struct page **page;
++ void **virt;
++ unsigned long offset, sb_bindex;
++
++ /*
++ * Verify that faults are only done on the range of pages owned by the
++ * reader.
++ */
++ offset = pgoff << PAGE_SHIFT;
++ sb_bindex = subbuffer_id_get_index(config, buf->backend.buf_rsb.id);
++ if (!(offset >= buf->backend.array[sb_bindex]->mmap_offset
++ && offset < buf->backend.array[sb_bindex]->mmap_offset +
++ buf->backend.chan->backend.subbuf_size))
++ return VM_FAULT_SIGBUS;
++ /*
++ * ring_buffer_read_get_page() gets the page in the current reader's
++ * pages.
++ */
++ page = lib_ring_buffer_read_get_page(&buf->backend, offset, &virt);
++ if (!*page)
++ return VM_FAULT_SIGBUS;
++ get_page(*page);
++ vmf->page = *page;
++
++ return 0;
++}
++
++/*
++ * vm_ops for ring buffer file mappings.
++ */
++static const struct vm_operations_struct lib_ring_buffer_mmap_ops = {
++ .fault = lib_ring_buffer_fault,
++};
++
++/**
++ * lib_ring_buffer_mmap_buf: - mmap channel buffer to process address space
++ * @buf: ring buffer to map
++ * @vma: vm_area_struct describing memory to be mapped
++ *
++ * Returns 0 if ok, negative on error
++ *
++ * Caller should already have grabbed mmap_sem.
++ */
++static int lib_ring_buffer_mmap_buf(struct lib_ring_buffer *buf,
++ struct vm_area_struct *vma)
++{
++ unsigned long length = vma->vm_end - vma->vm_start;
++ struct channel *chan = buf->backend.chan;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ unsigned long mmap_buf_len;
++
++ if (config->output != RING_BUFFER_MMAP)
++ return -EINVAL;
++
++ mmap_buf_len = chan->backend.buf_size;
++ if (chan->backend.extra_reader_sb)
++ mmap_buf_len += chan->backend.subbuf_size;
++
++ if (length != mmap_buf_len)
++ return -EINVAL;
++
++ vma->vm_ops = &lib_ring_buffer_mmap_ops;
++ vma->vm_flags |= VM_DONTEXPAND;
++ vma->vm_private_data = buf;
++
++ return 0;
++}
++
++int lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma,
++ struct lib_ring_buffer *buf)
++{
++ return lib_ring_buffer_mmap_buf(buf, vma);
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_mmap);
++
++/**
++ * vfs_lib_ring_buffer_mmap - mmap file op
++ * @filp: the file
++ * @vma: the vma describing what to map
++ *
++ * Calls upon lib_ring_buffer_mmap_buf() to map the file into user space.
++ */
++int vfs_lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++ struct lib_ring_buffer *buf = filp->private_data;
++ return lib_ring_buffer_mmap(filp, vma, buf);
++}
++EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_mmap);
+--- /dev/null
++++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_splice.c
+@@ -0,0 +1,227 @@
++/*
++ * ring_buffer_splice.c
++ *
++ * Copyright (C) 2002-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
++ * Copyright (C) 1999-2005 - Karim Yaghmour <karim@opersys.com>
++ * Copyright (C) 2008-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ *
++ * Re-using code from kernel/relay.c, which is why it is licensed under
++ * the GPLv2.
++ */
++
++#include <linux/module.h>
++#include <linux/fs.h>
++
++#include "../../wrapper/splice.h"
++#include "../../wrapper/ringbuffer/backend.h"
++#include "../../wrapper/ringbuffer/frontend.h"
++#include "../../wrapper/ringbuffer/vfs.h"
++
++#if 0
++#define printk_dbg(fmt, args...) printk(fmt, args)
++#else
++#define printk_dbg(fmt, args...)
++#endif
++
++loff_t vfs_lib_ring_buffer_no_llseek(struct file *file, loff_t offset,
++ int origin)
++{
++ return -ESPIPE;
++}
++EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_no_llseek);
++
++/*
++ * Release pages from the buffer so splice pipe_to_file can move them.
++ * Called after the pipe has been populated with buffer pages.
++ */
++static void lib_ring_buffer_pipe_buf_release(struct pipe_inode_info *pipe,
++ struct pipe_buffer *pbuf)
++{
++ __free_page(pbuf->page);
++}
++
++static const struct pipe_buf_operations ring_buffer_pipe_buf_ops = {
++ .can_merge = 0,
++ .map = generic_pipe_buf_map,
++ .unmap = generic_pipe_buf_unmap,
++ .confirm = generic_pipe_buf_confirm,
++ .release = lib_ring_buffer_pipe_buf_release,
++ .steal = generic_pipe_buf_steal,
++ .get = generic_pipe_buf_get,
++};
++
++/*
++ * Page release operation after splice pipe_to_file ends.
++ */
++static void lib_ring_buffer_page_release(struct splice_pipe_desc *spd,
++ unsigned int i)
++{
++ __free_page(spd->pages[i]);
++}
++
++/*
++ * subbuf_splice_actor - splice up to one subbuf's worth of data
++ */
++static int subbuf_splice_actor(struct file *in,
++ loff_t *ppos,
++ struct pipe_inode_info *pipe,
++ size_t len,
++ unsigned int flags,
++ struct lib_ring_buffer *buf)
++{
++ struct channel *chan = buf->backend.chan;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ unsigned int poff, subbuf_pages, nr_pages;
++ struct page *pages[PIPE_DEF_BUFFERS];
++ struct partial_page partial[PIPE_DEF_BUFFERS];
++ struct splice_pipe_desc spd = {
++ .pages = pages,
++ .nr_pages = 0,
++ .partial = partial,
++ .flags = flags,
++ .ops = &ring_buffer_pipe_buf_ops,
++ .spd_release = lib_ring_buffer_page_release,
++ };
++ unsigned long consumed_old, roffset;
++ unsigned long bytes_avail;
++
++ /*
++ * Check that a GET_SUBBUF ioctl has been done before.
++ */
++ WARN_ON(atomic_long_read(&buf->active_readers) != 1);
++ consumed_old = lib_ring_buffer_get_consumed(config, buf);
++ consumed_old += *ppos;
++
++ /*
++ * Adjust read len, if longer than what is available.
++ * Max read size is 1 subbuffer due to get_subbuf/put_subbuf for
++ * protection.
++ */
++ bytes_avail = chan->backend.subbuf_size;
++ WARN_ON(bytes_avail > chan->backend.buf_size);
++ len = min_t(size_t, len, bytes_avail);
++ subbuf_pages = bytes_avail >> PAGE_SHIFT;
++ nr_pages = min_t(unsigned int, subbuf_pages, PIPE_DEF_BUFFERS);
++ roffset = consumed_old & PAGE_MASK;
++ poff = consumed_old & ~PAGE_MASK;
++ printk_dbg(KERN_DEBUG "SPLICE actor len %zu pos %zd write_pos %ld\n",
++ len, (ssize_t)*ppos, lib_ring_buffer_get_offset(config, buf));
++
++ for (; spd.nr_pages < nr_pages; spd.nr_pages++) {
++ unsigned int this_len;
++ struct page **page, *new_page;
++ void **virt;
++
++ if (!len)
++ break;
++ printk_dbg(KERN_DEBUG "SPLICE actor loop len %zu roffset %ld\n",
++ len, roffset);
++
++ /*
++ * We have to replace the page we are moving into the splice
++ * pipe.
++ */
++ new_page = alloc_pages_node(cpu_to_node(max(buf->backend.cpu,
++ 0)),
++ GFP_KERNEL | __GFP_ZERO, 0);
++ if (!new_page)
++ break;
++
++ this_len = PAGE_SIZE - poff;
++ page = lib_ring_buffer_read_get_page(&buf->backend, roffset, &virt);
++ spd.pages[spd.nr_pages] = *page;
++ *page = new_page;
++ *virt = page_address(new_page);
++ spd.partial[spd.nr_pages].offset = poff;
++ spd.partial[spd.nr_pages].len = this_len;
++
++ poff = 0;
++ roffset += PAGE_SIZE;
++ len -= this_len;
++ }
++
++ if (!spd.nr_pages)
++ return 0;
++
++ return wrapper_splice_to_pipe(pipe, &spd);
++}
++
++ssize_t lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
++ struct pipe_inode_info *pipe, size_t len,
++ unsigned int flags,
++ struct lib_ring_buffer *buf)
++{
++ struct channel *chan = buf->backend.chan;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ ssize_t spliced;
++ int ret;
++
++ if (config->output != RING_BUFFER_SPLICE)
++ return -EINVAL;
++
++ /*
++ * We require ppos and length to be page-aligned for performance reasons
++ * (no page copy). Size is known using the ioctl
++ * RING_BUFFER_GET_PADDED_SUBBUF_SIZE, which is page-size padded.
++ * We fail when the ppos or len passed is not page-sized, because splice
++ * is not allowed to copy more than the length passed as parameter (so
++ * the ABI does not let us silently copy more than requested to include
++ * padding).
++ */
++ if (*ppos != PAGE_ALIGN(*ppos) || len != PAGE_ALIGN(len))
++ return -EINVAL;
++
++ ret = 0;
++ spliced = 0;
++
++ printk_dbg(KERN_DEBUG "SPLICE read len %zu pos %zd\n", len,
++ (ssize_t)*ppos);
++ while (len && !spliced) {
++ ret = subbuf_splice_actor(in, ppos, pipe, len, flags, buf);
++ printk_dbg(KERN_DEBUG "SPLICE read loop ret %d\n", ret);
++ if (ret < 0)
++ break;
++ else if (!ret) {
++ if (flags & SPLICE_F_NONBLOCK)
++ ret = -EAGAIN;
++ break;
++ }
++
++ *ppos += ret;
++ if (ret > len)
++ len = 0;
++ else
++ len -= ret;
++ spliced += ret;
++ }
++
++ if (spliced)
++ return spliced;
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_splice_read);
++
++ssize_t vfs_lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
++ struct pipe_inode_info *pipe, size_t len,
++ unsigned int flags)
++{
++ struct lib_ring_buffer *buf = in->private_data;
++
++ return lib_ring_buffer_splice_read(in, ppos, pipe, len, flags, buf);
++}
++EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_splice_read);
+--- /dev/null
++++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_vfs.c
+@@ -0,0 +1,450 @@
++/*
++ * ring_buffer_vfs.c
++ *
++ * Ring Buffer VFS file operations.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/compat.h>
++
++#include "../../wrapper/ringbuffer/backend.h"
++#include "../../wrapper/ringbuffer/frontend.h"
++#include "../../wrapper/ringbuffer/vfs.h"
++#include "../../wrapper/poll.h"
++
++static int put_ulong(unsigned long val, unsigned long arg)
++{
++ return put_user(val, (unsigned long __user *)arg);
++}
++
++#ifdef CONFIG_COMPAT
++static int compat_put_ulong(compat_ulong_t val, unsigned long arg)
++{
++ return put_user(val, (compat_ulong_t __user *)compat_ptr(arg));
++}
++#endif
++
++/*
++ * This is not used by anonymous file descriptors. This code is left
++ * there if we ever want to implement an inode with open() operation.
++ */
++int lib_ring_buffer_open(struct inode *inode, struct file *file,
++ struct lib_ring_buffer *buf)
++{
++ int ret;
++
++ if (!buf)
++ return -EINVAL;
++
++ ret = lib_ring_buffer_open_read(buf);
++ if (ret)
++ return ret;
++
++ ret = nonseekable_open(inode, file);
++ if (ret)
++ goto release_read;
++ return 0;
++
++release_read:
++ lib_ring_buffer_release_read(buf);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_open);
++
++/**
++ * vfs_lib_ring_buffer_open - ring buffer open file operation
++ * @inode: opened inode
++ * @file: opened file
++ *
++ * Open implementation. Makes sure only one open instance of a buffer is
++ * done at a given moment.
++ */
++static
++int vfs_lib_ring_buffer_open(struct inode *inode, struct file *file)
++{
++ struct lib_ring_buffer *buf = inode->i_private;
++
++ file->private_data = buf;
++ return lib_ring_buffer_open(inode, file, buf);
++}
++
++int lib_ring_buffer_release(struct inode *inode, struct file *file,
++ struct lib_ring_buffer *buf)
++{
++ lib_ring_buffer_release_read(buf);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_release);
++
++/**
++ * vfs_lib_ring_buffer_release - ring buffer release file operation
++ * @inode: opened inode
++ * @file: opened file
++ *
++ * Release implementation.
++ */
++static
++int vfs_lib_ring_buffer_release(struct inode *inode, struct file *file)
++{
++ struct lib_ring_buffer *buf = file->private_data;
++
++ return lib_ring_buffer_release(inode, file, buf);
++}
++
++unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait,
++ struct lib_ring_buffer *buf)
++{
++ unsigned int mask = 0;
++ struct channel *chan = buf->backend.chan;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++ int finalized, disabled;
++
++ if (filp->f_mode & FMODE_READ) {
++ poll_wait_set_exclusive(wait);
++ poll_wait(filp, &buf->read_wait, wait);
++
++ finalized = lib_ring_buffer_is_finalized(config, buf);
++ disabled = lib_ring_buffer_channel_is_disabled(chan);
++
++ /*
++ * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
++ * finalized load before offsets loads.
++ */
++ WARN_ON(atomic_long_read(&buf->active_readers) != 1);
++retry:
++ if (disabled)
++ return POLLERR;
++
++ if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf), chan)
++ - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf), chan)
++ == 0) {
++ if (finalized)
++ return POLLHUP;
++ else {
++ /*
++ * The memory barriers
++ * __wait_event()/wake_up_interruptible() take
++ * care of "raw_spin_is_locked" memory ordering.
++ */
++ if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
++ goto retry;
++ else
++ return 0;
++ }
++ } else {
++ if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf),
++ chan)
++ - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf),
++ chan)
++ >= chan->backend.buf_size)
++ return POLLPRI | POLLRDBAND;
++ else
++ return POLLIN | POLLRDNORM;
++ }
++ }
++ return mask;
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_poll);
++
++/**
++ * vfs_lib_ring_buffer_poll - ring buffer poll file operation
++ * @filp: the file
++ * @wait: poll table
++ *
++ * Poll implementation.
++ */
++static
++unsigned int vfs_lib_ring_buffer_poll(struct file *filp, poll_table *wait)
++{
++ struct lib_ring_buffer *buf = filp->private_data;
++
++ return lib_ring_buffer_poll(filp, wait, buf);
++}
++
++long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd,
++ unsigned long arg, struct lib_ring_buffer *buf)
++{
++ struct channel *chan = buf->backend.chan;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++
++ if (lib_ring_buffer_channel_is_disabled(chan))
++ return -EIO;
++
++ switch (cmd) {
++ case RING_BUFFER_SNAPSHOT:
++ return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
++ &buf->prod_snapshot);
++ case RING_BUFFER_SNAPSHOT_GET_CONSUMED:
++ return put_ulong(buf->cons_snapshot, arg);
++ case RING_BUFFER_SNAPSHOT_GET_PRODUCED:
++ return put_ulong(buf->prod_snapshot, arg);
++ case RING_BUFFER_GET_SUBBUF:
++ {
++ unsigned long uconsume;
++ long ret;
++
++ ret = get_user(uconsume, (unsigned long __user *) arg);
++ if (ret)
++ return ret; /* will return -EFAULT */
++ ret = lib_ring_buffer_get_subbuf(buf, uconsume);
++ if (!ret) {
++ /* Set file position to zero at each successful "get" */
++ filp->f_pos = 0;
++ }
++ return ret;
++ }
++ case RING_BUFFER_PUT_SUBBUF:
++ lib_ring_buffer_put_subbuf(buf);
++ return 0;
++
++ case RING_BUFFER_GET_NEXT_SUBBUF:
++ {
++ long ret;
++
++ ret = lib_ring_buffer_get_next_subbuf(buf);
++ if (!ret) {
++ /* Set file position to zero at each successful "get" */
++ filp->f_pos = 0;
++ }
++ return ret;
++ }
++ case RING_BUFFER_PUT_NEXT_SUBBUF:
++ lib_ring_buffer_put_next_subbuf(buf);
++ return 0;
++ case RING_BUFFER_GET_SUBBUF_SIZE:
++ return put_ulong(lib_ring_buffer_get_read_data_size(config, buf),
++ arg);
++ case RING_BUFFER_GET_PADDED_SUBBUF_SIZE:
++ {
++ unsigned long size;
++
++ size = lib_ring_buffer_get_read_data_size(config, buf);
++ size = PAGE_ALIGN(size);
++ return put_ulong(size, arg);
++ }
++ case RING_BUFFER_GET_MAX_SUBBUF_SIZE:
++ return put_ulong(chan->backend.subbuf_size, arg);
++ case RING_BUFFER_GET_MMAP_LEN:
++ {
++ unsigned long mmap_buf_len;
++
++ if (config->output != RING_BUFFER_MMAP)
++ return -EINVAL;
++ mmap_buf_len = chan->backend.buf_size;
++ if (chan->backend.extra_reader_sb)
++ mmap_buf_len += chan->backend.subbuf_size;
++ if (mmap_buf_len > INT_MAX)
++ return -EFBIG;
++ return put_ulong(mmap_buf_len, arg);
++ }
++ case RING_BUFFER_GET_MMAP_READ_OFFSET:
++ {
++ unsigned long sb_bindex;
++
++ if (config->output != RING_BUFFER_MMAP)
++ return -EINVAL;
++ sb_bindex = subbuffer_id_get_index(config,
++ buf->backend.buf_rsb.id);
++ return put_ulong(buf->backend.array[sb_bindex]->mmap_offset,
++ arg);
++ }
++ case RING_BUFFER_FLUSH:
++ lib_ring_buffer_switch_remote(buf);
++ return 0;
++ default:
++ return -ENOIOCTLCMD;
++ }
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_ioctl);
++
++/**
++ * vfs_lib_ring_buffer_ioctl - control ring buffer reader synchronization
++ *
++ * @filp: the file
++ * @cmd: the command
++ * @arg: command arg
++ *
++ * This ioctl implements commands necessary for producer/consumer
++ * and flight recorder reader interaction :
++ * RING_BUFFER_GET_NEXT_SUBBUF
++ * Get the next sub-buffer that can be read. It never blocks.
++ * RING_BUFFER_PUT_NEXT_SUBBUF
++ * Release the currently read sub-buffer.
++ * RING_BUFFER_GET_SUBBUF_SIZE
++ * returns the size of the current sub-buffer.
++ * RING_BUFFER_GET_MAX_SUBBUF_SIZE
++ * returns the maximum size for sub-buffers.
++ * RING_BUFFER_GET_NUM_SUBBUF
++ * returns the number of reader-visible sub-buffers in the per cpu
++ * channel (for mmap).
++ * RING_BUFFER_GET_MMAP_READ_OFFSET
++ * returns the offset of the subbuffer belonging to the reader.
++ * Should only be used for mmap clients.
++ */
++static
++long vfs_lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++ struct lib_ring_buffer *buf = filp->private_data;
++
++ return lib_ring_buffer_ioctl(filp, cmd, arg, buf);
++}
++
++#ifdef CONFIG_COMPAT
++long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
++ unsigned long arg, struct lib_ring_buffer *buf)
++{
++ struct channel *chan = buf->backend.chan;
++ const struct lib_ring_buffer_config *config = &chan->backend.config;
++
++ if (lib_ring_buffer_channel_is_disabled(chan))
++ return -EIO;
++
++ switch (cmd) {
++ case RING_BUFFER_COMPAT_SNAPSHOT:
++ return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
++ &buf->prod_snapshot);
++ case RING_BUFFER_COMPAT_SNAPSHOT_GET_CONSUMED:
++ return compat_put_ulong(buf->cons_snapshot, arg);
++ case RING_BUFFER_COMPAT_SNAPSHOT_GET_PRODUCED:
++ return compat_put_ulong(buf->prod_snapshot, arg);
++ case RING_BUFFER_COMPAT_GET_SUBBUF:
++ {
++ __u32 uconsume;
++ unsigned long consume;
++ long ret;
++
++ ret = get_user(uconsume, (__u32 __user *) arg);
++ if (ret)
++ return ret; /* will return -EFAULT */
++ consume = buf->cons_snapshot;
++ consume &= ~0xFFFFFFFFL;
++ consume |= uconsume;
++ ret = lib_ring_buffer_get_subbuf(buf, consume);
++ if (!ret) {
++ /* Set file position to zero at each successful "get" */
++ filp->f_pos = 0;
++ }
++ return ret;
++ }
++ case RING_BUFFER_COMPAT_PUT_SUBBUF:
++ lib_ring_buffer_put_subbuf(buf);
++ return 0;
++
++ case RING_BUFFER_COMPAT_GET_NEXT_SUBBUF:
++ {
++ long ret;
++
++ ret = lib_ring_buffer_get_next_subbuf(buf);
++ if (!ret) {
++ /* Set file position to zero at each successful "get" */
++ filp->f_pos = 0;
++ }
++ return ret;
++ }
++ case RING_BUFFER_COMPAT_PUT_NEXT_SUBBUF:
++ lib_ring_buffer_put_next_subbuf(buf);
++ return 0;
++ case RING_BUFFER_COMPAT_GET_SUBBUF_SIZE:
++ {
++ unsigned long data_size;
++
++ data_size = lib_ring_buffer_get_read_data_size(config, buf);
++ if (data_size > UINT_MAX)
++ return -EFBIG;
++ return compat_put_ulong(data_size, arg);
++ }
++ case RING_BUFFER_COMPAT_GET_PADDED_SUBBUF_SIZE:
++ {
++ unsigned long size;
++
++ size = lib_ring_buffer_get_read_data_size(config, buf);
++ size = PAGE_ALIGN(size);
++ if (size > UINT_MAX)
++ return -EFBIG;
++ return compat_put_ulong(size, arg);
++ }
++ case RING_BUFFER_COMPAT_GET_MAX_SUBBUF_SIZE:
++ if (chan->backend.subbuf_size > UINT_MAX)
++ return -EFBIG;
++ return compat_put_ulong(chan->backend.subbuf_size, arg);
++ case RING_BUFFER_COMPAT_GET_MMAP_LEN:
++ {
++ unsigned long mmap_buf_len;
++
++ if (config->output != RING_BUFFER_MMAP)
++ return -EINVAL;
++ mmap_buf_len = chan->backend.buf_size;
++ if (chan->backend.extra_reader_sb)
++ mmap_buf_len += chan->backend.subbuf_size;
++ if (mmap_buf_len > UINT_MAX)
++ return -EFBIG;
++ return compat_put_ulong(mmap_buf_len, arg);
++ }
++ case RING_BUFFER_COMPAT_GET_MMAP_READ_OFFSET:
++ {
++ unsigned long sb_bindex, read_offset;
++
++ if (config->output != RING_BUFFER_MMAP)
++ return -EINVAL;
++ sb_bindex = subbuffer_id_get_index(config,
++ buf->backend.buf_rsb.id);
++ read_offset = buf->backend.array[sb_bindex]->mmap_offset;
++ if (read_offset > UINT_MAX)
++ return -EINVAL;
++ return compat_put_ulong(read_offset, arg);
++ }
++ case RING_BUFFER_COMPAT_FLUSH:
++ lib_ring_buffer_switch_remote(buf);
++ return 0;
++ default:
++ return -ENOIOCTLCMD;
++ }
++}
++EXPORT_SYMBOL_GPL(lib_ring_buffer_compat_ioctl);
++
++static
++long vfs_lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
++ unsigned long arg)
++{
++ struct lib_ring_buffer *buf = filp->private_data;
++
++ return lib_ring_buffer_compat_ioctl(filp, cmd, arg, buf);
++}
++#endif
++
++const struct file_operations lib_ring_buffer_file_operations = {
++ .owner = THIS_MODULE,
++ .open = vfs_lib_ring_buffer_open,
++ .release = vfs_lib_ring_buffer_release,
++ .poll = vfs_lib_ring_buffer_poll,
++ .splice_read = vfs_lib_ring_buffer_splice_read,
++ .mmap = vfs_lib_ring_buffer_mmap,
++ .unlocked_ioctl = vfs_lib_ring_buffer_ioctl,
++ .llseek = vfs_lib_ring_buffer_no_llseek,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = vfs_lib_ring_buffer_compat_ioctl,
++#endif
++};
++EXPORT_SYMBOL_GPL(lib_ring_buffer_file_operations);
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("Ring Buffer Library VFS");
+--- /dev/null
++++ b/drivers/staging/lttng/lib/ringbuffer/vatomic.h
+@@ -0,0 +1,97 @@
++#ifndef _LIB_RING_BUFFER_VATOMIC_H
++#define _LIB_RING_BUFFER_VATOMIC_H
++
++/*
++ * lib/ringbuffer/vatomic.h
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <asm/atomic.h>
++#include <asm/local.h>
++
++/*
++ * Same data type (long) accessed differently depending on configuration.
++ * v field is for non-atomic access (protected by mutual exclusion).
++ * In the fast-path, the ring_buffer_config structure is constant, so the
++ * compiler can statically select the appropriate branch.
++ * local_t is used for per-cpu and per-thread buffers.
++ * atomic_long_t is used for globally shared buffers.
++ */
++union v_atomic {
++ local_t l;
++ atomic_long_t a;
++ long v;
++};
++
++static inline
++long v_read(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
++{
++ if (config->sync == RING_BUFFER_SYNC_PER_CPU)
++ return local_read(&v_a->l);
++ else
++ return atomic_long_read(&v_a->a);
++}
++
++static inline
++void v_set(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
++ long v)
++{
++ if (config->sync == RING_BUFFER_SYNC_PER_CPU)
++ local_set(&v_a->l, v);
++ else
++ atomic_long_set(&v_a->a, v);
++}
++
++static inline
++void v_add(const struct lib_ring_buffer_config *config, long v, union v_atomic *v_a)
++{
++ if (config->sync == RING_BUFFER_SYNC_PER_CPU)
++ local_add(v, &v_a->l);
++ else
++ atomic_long_add(v, &v_a->a);
++}
++
++static inline
++void v_inc(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
++{
++ if (config->sync == RING_BUFFER_SYNC_PER_CPU)
++ local_inc(&v_a->l);
++ else
++ atomic_long_inc(&v_a->a);
++}
++
++/*
++ * Non-atomic decrement. Only used by reader, apply to reader-owned subbuffer.
++ */
++static inline
++void _v_dec(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
++{
++ --v_a->v;
++}
++
++static inline
++long v_cmpxchg(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
++ long old, long _new)
++{
++ if (config->sync == RING_BUFFER_SYNC_PER_CPU)
++ return local_cmpxchg(&v_a->l, old, _new);
++ else
++ return atomic_long_cmpxchg(&v_a->a, old, _new);
++}
++
++#endif /* _LIB_RING_BUFFER_VATOMIC_H */
+--- /dev/null
++++ b/drivers/staging/lttng/lib/ringbuffer/vfs.h
+@@ -0,0 +1,150 @@
++#ifndef _LIB_RING_BUFFER_VFS_H
++#define _LIB_RING_BUFFER_VFS_H
++
++/*
++ * lib/ringbuffer/vfs.h
++ *
++ * Wait-free ring buffer VFS file operations.
++ *
++ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ *
++ * Author:
++ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ */
++
++#include <linux/fs.h>
++#include <linux/poll.h>
++
++/* VFS API */
++
++extern const struct file_operations lib_ring_buffer_file_operations;
++
++/*
++ * Internal file operations.
++ */
++
++struct lib_ring_buffer;
++
++int lib_ring_buffer_open(struct inode *inode, struct file *file,
++ struct lib_ring_buffer *buf);
++int lib_ring_buffer_release(struct inode *inode, struct file *file,
++ struct lib_ring_buffer *buf);
++unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait,
++ struct lib_ring_buffer *buf);
++ssize_t lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
++ struct pipe_inode_info *pipe, size_t len,
++ unsigned int flags, struct lib_ring_buffer *buf);
++int lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma,
++ struct lib_ring_buffer *buf);
++
++/* Ring Buffer ioctl() and ioctl numbers */
++long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd,
++ unsigned long arg, struct lib_ring_buffer *buf);
++#ifdef CONFIG_COMPAT
++long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
++ unsigned long arg, struct lib_ring_buffer *buf);
++#endif
++
++ssize_t vfs_lib_ring_buffer_file_splice_read(struct file *in, loff_t *ppos,
++ struct pipe_inode_info *pipe, size_t len, unsigned int flags);
++loff_t vfs_lib_ring_buffer_no_llseek(struct file *file, loff_t offset,
++ int origin);
++int vfs_lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma);
++ssize_t vfs_lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
++ struct pipe_inode_info *pipe, size_t len,
++ unsigned int flags);
++
++/*
++ * Use RING_BUFFER_GET_NEXT_SUBBUF / RING_BUFFER_PUT_NEXT_SUBBUF to read and
++ * consume sub-buffers sequentially.
++ *
++ * Reading sub-buffers without consuming them can be performed with:
++ *
++ * RING_BUFFER_SNAPSHOT
++ * RING_BUFFER_SNAPSHOT_GET_CONSUMED
++ * RING_BUFFER_SNAPSHOT_GET_PRODUCED
++ *
++ * to get the offset range to consume, and then by passing each sub-buffer
++ * offset to RING_BUFFER_GET_SUBBUF, read the sub-buffer, and then release it
++ * with RING_BUFFER_PUT_SUBBUF.
++ *
++ * Note that the "snapshot" API can be used to read the sub-buffer in reverse
++ * order, which is useful for flight recorder snapshots.
++ */
++
++/* Get a snapshot of the current ring buffer producer and consumer positions */
++#define RING_BUFFER_SNAPSHOT _IO(0xF6, 0x00)
++/* Get the consumer position (iteration start) */
++#define RING_BUFFER_SNAPSHOT_GET_CONSUMED _IOR(0xF6, 0x01, unsigned long)
++/* Get the producer position (iteration end) */
++#define RING_BUFFER_SNAPSHOT_GET_PRODUCED _IOR(0xF6, 0x02, unsigned long)
++/* Get exclusive read access to the specified sub-buffer position */
++#define RING_BUFFER_GET_SUBBUF _IOW(0xF6, 0x03, unsigned long)
++/* Release exclusive sub-buffer access */
++#define RING_BUFFER_PUT_SUBBUF _IO(0xF6, 0x04)
++
++/* Get exclusive read access to the next sub-buffer that can be read. */
++#define RING_BUFFER_GET_NEXT_SUBBUF _IO(0xF6, 0x05)
++/* Release exclusive sub-buffer access, move consumer forward. */
++#define RING_BUFFER_PUT_NEXT_SUBBUF _IO(0xF6, 0x06)
++/* returns the size of the current sub-buffer, without padding (for mmap). */
++#define RING_BUFFER_GET_SUBBUF_SIZE _IOR(0xF6, 0x07, unsigned long)
++/* returns the size of the current sub-buffer, with padding (for splice). */
++#define RING_BUFFER_GET_PADDED_SUBBUF_SIZE _IOR(0xF6, 0x08, unsigned long)
++/* returns the maximum size for sub-buffers. */
++#define RING_BUFFER_GET_MAX_SUBBUF_SIZE _IOR(0xF6, 0x09, unsigned long)
++/* returns the length to mmap. */
++#define RING_BUFFER_GET_MMAP_LEN _IOR(0xF6, 0x0A, unsigned long)
++/* returns the offset of the subbuffer belonging to the mmap reader. */
++#define RING_BUFFER_GET_MMAP_READ_OFFSET _IOR(0xF6, 0x0B, unsigned long)
++/* flush the current sub-buffer */
++#define RING_BUFFER_FLUSH _IO(0xF6, 0x0C)
++
++#ifdef CONFIG_COMPAT
++/* Get a snapshot of the current ring buffer producer and consumer positions */
++#define RING_BUFFER_COMPAT_SNAPSHOT RING_BUFFER_SNAPSHOT
++/* Get the consumer position (iteration start) */
++#define RING_BUFFER_COMPAT_SNAPSHOT_GET_CONSUMED \
++ _IOR(0xF6, 0x01, compat_ulong_t)
++/* Get the producer position (iteration end) */
++#define RING_BUFFER_COMPAT_SNAPSHOT_GET_PRODUCED \
++ _IOR(0xF6, 0x02, compat_ulong_t)
++/* Get exclusive read access to the specified sub-buffer position */
++#define RING_BUFFER_COMPAT_GET_SUBBUF _IOW(0xF6, 0x03, compat_ulong_t)
++/* Release exclusive sub-buffer access */
++#define RING_BUFFER_COMPAT_PUT_SUBBUF RING_BUFFER_PUT_SUBBUF
++
++/* Get exclusive read access to the next sub-buffer that can be read. */
++#define RING_BUFFER_COMPAT_GET_NEXT_SUBBUF RING_BUFFER_GET_NEXT_SUBBUF
++/* Release exclusive sub-buffer access, move consumer forward. */
++#define RING_BUFFER_COMPAT_PUT_NEXT_SUBBUF RING_BUFFER_PUT_NEXT_SUBBUF
++/* returns the size of the current sub-buffer, without padding (for mmap). */
++#define RING_BUFFER_COMPAT_GET_SUBBUF_SIZE _IOR(0xF6, 0x07, compat_ulong_t)
++/* returns the size of the current sub-buffer, with padding (for splice). */
++#define RING_BUFFER_COMPAT_GET_PADDED_SUBBUF_SIZE \
++ _IOR(0xF6, 0x08, compat_ulong_t)
++/* returns the maximum size for sub-buffers. */
++#define RING_BUFFER_COMPAT_GET_MAX_SUBBUF_SIZE _IOR(0xF6, 0x09, compat_ulong_t)
++/* returns the length to mmap. */
++#define RING_BUFFER_COMPAT_GET_MMAP_LEN _IOR(0xF6, 0x0A, compat_ulong_t)
++/* returns the offset of the subbuffer belonging to the mmap reader. */
++#define RING_BUFFER_COMPAT_GET_MMAP_READ_OFFSET _IOR(0xF6, 0x0B, compat_ulong_t)
++/* flush the current sub-buffer */
++#define RING_BUFFER_COMPAT_FLUSH RING_BUFFER_FLUSH
++#endif /* CONFIG_COMPAT */
++
++#endif /* _LIB_RING_BUFFER_VFS_H */
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-abi-old.h
+@@ -0,0 +1,141 @@
++#ifndef _LTTNG_ABI_OLD_H
++#define _LTTNG_ABI_OLD_H
++
++/*
++ * lttng-abi-old.h
++ *
++ * LTTng old ABI header (without support for compat 32/64 bits)
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/fs.h>
++#include "lttng-abi.h"
++
++/*
++ * LTTng DebugFS ABI structures.
++ */
++#define LTTNG_KERNEL_OLD_CHANNEL_PADDING LTTNG_KERNEL_SYM_NAME_LEN + 32
++struct lttng_kernel_old_channel {
++ int overwrite; /* 1: overwrite, 0: discard */
++ uint64_t subbuf_size; /* in bytes */
++ uint64_t num_subbuf;
++ unsigned int switch_timer_interval; /* usecs */
++ unsigned int read_timer_interval; /* usecs */
++ enum lttng_kernel_output output; /* splice, mmap */
++ char padding[LTTNG_KERNEL_OLD_CHANNEL_PADDING];
++};
++
++struct lttng_kernel_old_kretprobe {
++ uint64_t addr;
++
++ uint64_t offset;
++ char symbol_name[LTTNG_KERNEL_SYM_NAME_LEN];
++};
++
++/*
++ * Either addr is used, or symbol_name and offset.
++ */
++struct lttng_kernel_old_kprobe {
++ uint64_t addr;
++
++ uint64_t offset;
++ char symbol_name[LTTNG_KERNEL_SYM_NAME_LEN];
++};
++
++struct lttng_kernel_old_function_tracer {
++ char symbol_name[LTTNG_KERNEL_SYM_NAME_LEN];
++};
++
++/*
++ * For syscall tracing, name = '\0' means "enable all".
++ */
++#define LTTNG_KERNEL_OLD_EVENT_PADDING1 16
++#define LTTNG_KERNEL_OLD_EVENT_PADDING2 LTTNG_KERNEL_SYM_NAME_LEN + 32
++struct lttng_kernel_old_event {
++ char name[LTTNG_KERNEL_SYM_NAME_LEN]; /* event name */
++ enum lttng_kernel_instrumentation instrumentation;
++ char padding[LTTNG_KERNEL_OLD_EVENT_PADDING1];
++
++ /* Per instrumentation type configuration */
++ union {
++ struct lttng_kernel_old_kretprobe kretprobe;
++ struct lttng_kernel_old_kprobe kprobe;
++ struct lttng_kernel_old_function_tracer ftrace;
++ char padding[LTTNG_KERNEL_OLD_EVENT_PADDING2];
++ } u;
++};
++
++struct lttng_kernel_old_tracer_version {
++ uint32_t major;
++ uint32_t minor;
++ uint32_t patchlevel;
++};
++
++struct lttng_kernel_old_calibrate {
++ enum lttng_kernel_calibrate_type type; /* type (input) */
++};
++
++struct lttng_kernel_old_perf_counter_ctx {
++ uint32_t type;
++ uint64_t config;
++ char name[LTTNG_KERNEL_SYM_NAME_LEN];
++};
++
++#define LTTNG_KERNEL_OLD_CONTEXT_PADDING1 16
++#define LTTNG_KERNEL_OLD_CONTEXT_PADDING2 LTTNG_KERNEL_SYM_NAME_LEN + 32
++struct lttng_kernel_old_context {
++ enum lttng_kernel_context_type ctx;
++ char padding[LTTNG_KERNEL_OLD_CONTEXT_PADDING1];
++
++ union {
++ struct lttng_kernel_old_perf_counter_ctx perf_counter;
++ char padding[LTTNG_KERNEL_OLD_CONTEXT_PADDING2];
++ } u;
++};
++
++/* LTTng file descriptor ioctl */
++#define LTTNG_KERNEL_OLD_SESSION _IO(0xF6, 0x40)
++#define LTTNG_KERNEL_OLD_TRACER_VERSION \
++ _IOR(0xF6, 0x41, struct lttng_kernel_old_tracer_version)
++#define LTTNG_KERNEL_OLD_TRACEPOINT_LIST _IO(0xF6, 0x42)
++#define LTTNG_KERNEL_OLD_WAIT_QUIESCENT _IO(0xF6, 0x43)
++#define LTTNG_KERNEL_OLD_CALIBRATE \
++ _IOWR(0xF6, 0x44, struct lttng_kernel_old_calibrate)
++
++/* Session FD ioctl */
++#define LTTNG_KERNEL_OLD_METADATA \
++ _IOW(0xF6, 0x50, struct lttng_kernel_old_channel)
++#define LTTNG_KERNEL_OLD_CHANNEL \
++ _IOW(0xF6, 0x51, struct lttng_kernel_old_channel)
++#define LTTNG_KERNEL_OLD_SESSION_START _IO(0xF6, 0x52)
++#define LTTNG_KERNEL_OLD_SESSION_STOP _IO(0xF6, 0x53)
++
++/* Channel FD ioctl */
++#define LTTNG_KERNEL_OLD_STREAM _IO(0xF6, 0x60)
++#define LTTNG_KERNEL_OLD_EVENT \
++ _IOW(0xF6, 0x61, struct lttng_kernel_old_event)
++
++/* Event and Channel FD ioctl */
++#define LTTNG_KERNEL_OLD_CONTEXT \
++ _IOW(0xF6, 0x70, struct lttng_kernel_old_context)
++
++/* Event, Channel and Session ioctl */
++#define LTTNG_KERNEL_OLD_ENABLE _IO(0xF6, 0x80)
++#define LTTNG_KERNEL_OLD_DISABLE _IO(0xF6, 0x81)
++
++#endif /* _LTTNG_ABI_OLD_H */
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-abi.c
+@@ -0,0 +1,1346 @@
++/*
++ * lttng-abi.c
++ *
++ * LTTng ABI
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ *
++ *
++ * Mimic system calls for:
++ * - session creation, returns a file descriptor or failure.
++ * - channel creation, returns a file descriptor or failure.
++ * - Operates on a session file descriptor
++ * - Takes all channel options as parameters.
++ * - stream get, returns a file descriptor or failure.
++ * - Operates on a channel file descriptor.
++ * - stream notifier get, returns a file descriptor or failure.
++ * - Operates on a channel file descriptor.
++ * - event creation, returns a file descriptor or failure.
++ * - Operates on a channel file descriptor
++ * - Takes an event name as parameter
++ * - Takes an instrumentation source as parameter
++ * - e.g. tracepoints, dynamic_probes...
++ * - Takes instrumentation source specific arguments.
++ */
++
++#include <linux/module.h>
++#include <linux/proc_fs.h>
++#include <linux/anon_inodes.h>
++#include <linux/file.h>
++#include <linux/uaccess.h>
++#include <linux/slab.h>
++#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
++#include "wrapper/ringbuffer/vfs.h"
++#include "wrapper/ringbuffer/backend.h"
++#include "wrapper/ringbuffer/frontend.h"
++#include "wrapper/poll.h"
++#include "lttng-abi.h"
++#include "lttng-abi-old.h"
++#include "lttng-events.h"
++#include "lttng-tracer.h"
++
++/*
++ * This is LTTng's own personal way to create a system call as an external
++ * module. We use ioctl() on /proc/lttng.
++ */
++
++static struct proc_dir_entry *lttng_proc_dentry;
++static const struct file_operations lttng_fops;
++static const struct file_operations lttng_session_fops;
++static const struct file_operations lttng_channel_fops;
++static const struct file_operations lttng_metadata_fops;
++static const struct file_operations lttng_event_fops;
++
++/*
++ * Teardown management: opened file descriptors keep a refcount on the module,
++ * so it can only exit when all file descriptors are closed.
++ */
++
++static
++int lttng_abi_create_session(void)
++{
++ struct lttng_session *session;
++ struct file *session_file;
++ int session_fd, ret;
++
++ session = lttng_session_create();
++ if (!session)
++ return -ENOMEM;
++ session_fd = get_unused_fd();
++ if (session_fd < 0) {
++ ret = session_fd;
++ goto fd_error;
++ }
++ session_file = anon_inode_getfile("[lttng_session]",
++ &lttng_session_fops,
++ session, O_RDWR);
++ if (IS_ERR(session_file)) {
++ ret = PTR_ERR(session_file);
++ goto file_error;
++ }
++ session->file = session_file;
++ fd_install(session_fd, session_file);
++ return session_fd;
++
++file_error:
++ put_unused_fd(session_fd);
++fd_error:
++ lttng_session_destroy(session);
++ return ret;
++}
++
++static
++int lttng_abi_tracepoint_list(void)
++{
++ struct file *tracepoint_list_file;
++ int file_fd, ret;
++
++ file_fd = get_unused_fd();
++ if (file_fd < 0) {
++ ret = file_fd;
++ goto fd_error;
++ }
++
++ tracepoint_list_file = anon_inode_getfile("[lttng_session]",
++ &lttng_tracepoint_list_fops,
++ NULL, O_RDWR);
++ if (IS_ERR(tracepoint_list_file)) {
++ ret = PTR_ERR(tracepoint_list_file);
++ goto file_error;
++ }
++ ret = lttng_tracepoint_list_fops.open(NULL, tracepoint_list_file);
++ if (ret < 0)
++ goto open_error;
++ fd_install(file_fd, tracepoint_list_file);
++ if (file_fd < 0) {
++ ret = file_fd;
++ goto fd_error;
++ }
++ return file_fd;
++
++open_error:
++ fput(tracepoint_list_file);
++file_error:
++ put_unused_fd(file_fd);
++fd_error:
++ return ret;
++}
++
++static
++void lttng_abi_tracer_version(struct lttng_kernel_tracer_version *v)
++{
++ v->major = LTTNG_MODULES_MAJOR_VERSION;
++ v->minor = LTTNG_MODULES_MINOR_VERSION;
++ v->patchlevel = LTTNG_MODULES_PATCHLEVEL_VERSION;
++}
++
++static
++long lttng_abi_add_context(struct file *file,
++ struct lttng_kernel_context *context_param,
++ struct lttng_ctx **ctx, struct lttng_session *session)
++{
++
++ if (session->been_active)
++ return -EPERM;
++
++ switch (context_param->ctx) {
++ case LTTNG_KERNEL_CONTEXT_PID:
++ return lttng_add_pid_to_ctx(ctx);
++ case LTTNG_KERNEL_CONTEXT_PRIO:
++ return lttng_add_prio_to_ctx(ctx);
++ case LTTNG_KERNEL_CONTEXT_NICE:
++ return lttng_add_nice_to_ctx(ctx);
++ case LTTNG_KERNEL_CONTEXT_VPID:
++ return lttng_add_vpid_to_ctx(ctx);
++ case LTTNG_KERNEL_CONTEXT_TID:
++ return lttng_add_tid_to_ctx(ctx);
++ case LTTNG_KERNEL_CONTEXT_VTID:
++ return lttng_add_vtid_to_ctx(ctx);
++ case LTTNG_KERNEL_CONTEXT_PPID:
++ return lttng_add_ppid_to_ctx(ctx);
++ case LTTNG_KERNEL_CONTEXT_VPPID:
++ return lttng_add_vppid_to_ctx(ctx);
++ case LTTNG_KERNEL_CONTEXT_PERF_COUNTER:
++ context_param->u.perf_counter.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
++ return lttng_add_perf_counter_to_ctx(context_param->u.perf_counter.type,
++ context_param->u.perf_counter.config,
++ context_param->u.perf_counter.name,
++ ctx);
++ case LTTNG_KERNEL_CONTEXT_PROCNAME:
++ return lttng_add_procname_to_ctx(ctx);
++ case LTTNG_KERNEL_CONTEXT_HOSTNAME:
++ return lttng_add_hostname_to_ctx(ctx);
++ default:
++ return -EINVAL;
++ }
++}
++
++/**
++ * lttng_ioctl - lttng syscall through ioctl
++ *
++ * @file: the file
++ * @cmd: the command
++ * @arg: command arg
++ *
++ * This ioctl implements lttng commands:
++ * LTTNG_KERNEL_SESSION
++ * Returns a LTTng trace session file descriptor
++ * LTTNG_KERNEL_TRACER_VERSION
++ * Returns the LTTng kernel tracer version
++ * LTTNG_KERNEL_TRACEPOINT_LIST
++ * Returns a file descriptor listing available tracepoints
++ * LTTNG_KERNEL_WAIT_QUIESCENT
++ * Returns after all previously running probes have completed
++ *
++ * The returned session will be deleted when its file descriptor is closed.
++ */
++static
++long lttng_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ switch (cmd) {
++ case LTTNG_KERNEL_OLD_SESSION:
++ case LTTNG_KERNEL_SESSION:
++ return lttng_abi_create_session();
++ case LTTNG_KERNEL_OLD_TRACER_VERSION:
++ {
++ struct lttng_kernel_tracer_version v;
++ struct lttng_kernel_old_tracer_version oldv;
++ struct lttng_kernel_old_tracer_version *uversion =
++ (struct lttng_kernel_old_tracer_version __user *) arg;
++
++ lttng_abi_tracer_version(&v);
++ oldv.major = v.major;
++ oldv.minor = v.minor;
++ oldv.patchlevel = v.patchlevel;
++
++ if (copy_to_user(uversion, &oldv, sizeof(oldv)))
++ return -EFAULT;
++ return 0;
++ }
++ case LTTNG_KERNEL_TRACER_VERSION:
++ {
++ struct lttng_kernel_tracer_version version;
++ struct lttng_kernel_tracer_version *uversion =
++ (struct lttng_kernel_tracer_version __user *) arg;
++
++ lttng_abi_tracer_version(&version);
++
++ if (copy_to_user(uversion, &version, sizeof(version)))
++ return -EFAULT;
++ return 0;
++ }
++ case LTTNG_KERNEL_OLD_TRACEPOINT_LIST:
++ case LTTNG_KERNEL_TRACEPOINT_LIST:
++ return lttng_abi_tracepoint_list();
++ case LTTNG_KERNEL_OLD_WAIT_QUIESCENT:
++ case LTTNG_KERNEL_WAIT_QUIESCENT:
++ synchronize_trace();
++ return 0;
++ case LTTNG_KERNEL_OLD_CALIBRATE:
++ {
++ struct lttng_kernel_old_calibrate __user *ucalibrate =
++ (struct lttng_kernel_old_calibrate __user *) arg;
++ struct lttng_kernel_old_calibrate old_calibrate;
++ struct lttng_kernel_calibrate calibrate;
++ int ret;
++
++ if (copy_from_user(&old_calibrate, ucalibrate, sizeof(old_calibrate)))
++ return -EFAULT;
++ calibrate.type = old_calibrate.type;
++ ret = lttng_calibrate(&calibrate);
++ if (copy_to_user(ucalibrate, &old_calibrate, sizeof(old_calibrate)))
++ return -EFAULT;
++ return ret;
++ }
++ case LTTNG_KERNEL_CALIBRATE:
++ {
++ struct lttng_kernel_calibrate __user *ucalibrate =
++ (struct lttng_kernel_calibrate __user *) arg;
++ struct lttng_kernel_calibrate calibrate;
++ int ret;
++
++ if (copy_from_user(&calibrate, ucalibrate, sizeof(calibrate)))
++ return -EFAULT;
++ ret = lttng_calibrate(&calibrate);
++ if (copy_to_user(ucalibrate, &calibrate, sizeof(calibrate)))
++ return -EFAULT;
++ return ret;
++ }
++ default:
++ return -ENOIOCTLCMD;
++ }
++}
++
++static const struct file_operations lttng_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = lttng_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = lttng_ioctl,
++#endif
++};
++
++static
++int lttng_abi_create_channel(struct file *session_file,
++ struct lttng_kernel_channel *chan_param,
++ enum channel_type channel_type)
++{
++ struct lttng_session *session = session_file->private_data;
++ const struct file_operations *fops = NULL;
++ const char *transport_name;
++ struct lttng_channel *chan;
++ struct file *chan_file;
++ int chan_fd;
++ int ret = 0;
++
++ chan_fd = get_unused_fd();
++ if (chan_fd < 0) {
++ ret = chan_fd;
++ goto fd_error;
++ }
++ switch (channel_type) {
++ case PER_CPU_CHANNEL:
++ fops = &lttng_channel_fops;
++ break;
++ case METADATA_CHANNEL:
++ fops = &lttng_metadata_fops;
++ break;
++ }
++
++ chan_file = anon_inode_getfile("[lttng_channel]",
++ fops,
++ NULL, O_RDWR);
++ if (IS_ERR(chan_file)) {
++ ret = PTR_ERR(chan_file);
++ goto file_error;
++ }
++ switch (channel_type) {
++ case PER_CPU_CHANNEL:
++ if (chan_param->output == LTTNG_KERNEL_SPLICE) {
++ transport_name = chan_param->overwrite ?
++ "relay-overwrite" : "relay-discard";
++ } else if (chan_param->output == LTTNG_KERNEL_MMAP) {
++ transport_name = chan_param->overwrite ?
++ "relay-overwrite-mmap" : "relay-discard-mmap";
++ } else {
++ return -EINVAL;
++ }
++ break;
++ case METADATA_CHANNEL:
++ if (chan_param->output == LTTNG_KERNEL_SPLICE)
++ transport_name = "relay-metadata";
++ else if (chan_param->output == LTTNG_KERNEL_MMAP)
++ transport_name = "relay-metadata-mmap";
++ else
++ return -EINVAL;
++ break;
++ default:
++ transport_name = "<unknown>";
++ break;
++ }
++ /*
++ * We tolerate no failure path after channel creation. It will stay
++ * invariant for the rest of the session.
++ */
++ chan = lttng_channel_create(session, transport_name, NULL,
++ chan_param->subbuf_size,
++ chan_param->num_subbuf,
++ chan_param->switch_timer_interval,
++ chan_param->read_timer_interval,
++ channel_type);
++ if (!chan) {
++ ret = -EINVAL;
++ goto chan_error;
++ }
++ chan->file = chan_file;
++ chan_file->private_data = chan;
++ fd_install(chan_fd, chan_file);
++ atomic_long_inc(&session_file->f_count);
++
++ return chan_fd;
++
++chan_error:
++ fput(chan_file);
++file_error:
++ put_unused_fd(chan_fd);
++fd_error:
++ return ret;
++}
++
++/**
++ * lttng_session_ioctl - lttng session fd ioctl
++ *
++ * @file: the file
++ * @cmd: the command
++ * @arg: command arg
++ *
++ * This ioctl implements lttng commands:
++ * LTTNG_KERNEL_CHANNEL
++ * Returns a LTTng channel file descriptor
++ * LTTNG_KERNEL_ENABLE
++ * Enables tracing for a session (weak enable)
++ * LTTNG_KERNEL_DISABLE
++ * Disables tracing for a session (strong disable)
++ * LTTNG_KERNEL_METADATA
++ * Returns a LTTng metadata file descriptor
++ *
++ * The returned channel will be deleted when its file descriptor is closed.
++ */
++static
++long lttng_session_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ struct lttng_session *session = file->private_data;
++
++ switch (cmd) {
++ case LTTNG_KERNEL_OLD_CHANNEL:
++ {
++ struct lttng_kernel_channel chan_param;
++ struct lttng_kernel_old_channel old_chan_param;
++
++ if (copy_from_user(&old_chan_param,
++ (struct lttng_kernel_old_channel __user *) arg,
++ sizeof(struct lttng_kernel_old_channel)))
++ return -EFAULT;
++ chan_param.overwrite = old_chan_param.overwrite;
++ chan_param.subbuf_size = old_chan_param.subbuf_size;
++ chan_param.num_subbuf = old_chan_param.num_subbuf;
++ chan_param.switch_timer_interval = old_chan_param.switch_timer_interval;
++ chan_param.read_timer_interval = old_chan_param.read_timer_interval;
++ chan_param.output = old_chan_param.output;
++
++ return lttng_abi_create_channel(file, &chan_param,
++ PER_CPU_CHANNEL);
++ }
++ case LTTNG_KERNEL_CHANNEL:
++ {
++ struct lttng_kernel_channel chan_param;
++
++ if (copy_from_user(&chan_param,
++ (struct lttng_kernel_channel __user *) arg,
++ sizeof(struct lttng_kernel_channel)))
++ return -EFAULT;
++ return lttng_abi_create_channel(file, &chan_param,
++ PER_CPU_CHANNEL);
++ }
++ case LTTNG_KERNEL_OLD_SESSION_START:
++ case LTTNG_KERNEL_OLD_ENABLE:
++ case LTTNG_KERNEL_SESSION_START:
++ case LTTNG_KERNEL_ENABLE:
++ return lttng_session_enable(session);
++ case LTTNG_KERNEL_OLD_SESSION_STOP:
++ case LTTNG_KERNEL_OLD_DISABLE:
++ case LTTNG_KERNEL_SESSION_STOP:
++ case LTTNG_KERNEL_DISABLE:
++ return lttng_session_disable(session);
++ case LTTNG_KERNEL_OLD_METADATA:
++ {
++ struct lttng_kernel_channel chan_param;
++ struct lttng_kernel_old_channel old_chan_param;
++
++ if (copy_from_user(&old_chan_param,
++ (struct lttng_kernel_old_channel __user *) arg,
++ sizeof(struct lttng_kernel_old_channel)))
++ return -EFAULT;
++ chan_param.overwrite = old_chan_param.overwrite;
++ chan_param.subbuf_size = old_chan_param.subbuf_size;
++ chan_param.num_subbuf = old_chan_param.num_subbuf;
++ chan_param.switch_timer_interval = old_chan_param.switch_timer_interval;
++ chan_param.read_timer_interval = old_chan_param.read_timer_interval;
++ chan_param.output = old_chan_param.output;
++
++ return lttng_abi_create_channel(file, &chan_param,
++ METADATA_CHANNEL);
++ }
++ case LTTNG_KERNEL_METADATA:
++ {
++ struct lttng_kernel_channel chan_param;
++
++ if (copy_from_user(&chan_param,
++ (struct lttng_kernel_channel __user *) arg,
++ sizeof(struct lttng_kernel_channel)))
++ return -EFAULT;
++ return lttng_abi_create_channel(file, &chan_param,
++ METADATA_CHANNEL);
++ }
++ default:
++ return -ENOIOCTLCMD;
++ }
++}
++
++/*
++ * Called when the last file reference is dropped.
++ *
++ * Big fat note: channels and events are invariant for the whole session after
++ * their creation. So this session destruction also destroys all channel and
++ * event structures specific to this session (they are not destroyed when their
++ * individual file is released).
++ */
++static
++int lttng_session_release(struct inode *inode, struct file *file)
++{
++ struct lttng_session *session = file->private_data;
++
++ if (session)
++ lttng_session_destroy(session);
++ return 0;
++}
++
++static const struct file_operations lttng_session_fops = {
++ .owner = THIS_MODULE,
++ .release = lttng_session_release,
++ .unlocked_ioctl = lttng_session_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = lttng_session_ioctl,
++#endif
++};
++
++/**
++ * lttng_metadata_ring_buffer_poll - LTTng ring buffer poll file operation
++ * @filp: the file
++ * @wait: poll table
++ *
++ * Handles the poll operations for the metadata channels.
++ */
++static
++unsigned int lttng_metadata_ring_buffer_poll(struct file *filp,
++ poll_table *wait)
++{
++ struct lttng_metadata_stream *stream = filp->private_data;
++ struct lib_ring_buffer *buf = stream->priv;
++ int finalized;
++ unsigned int mask = 0;
++
++ if (filp->f_mode & FMODE_READ) {
++ poll_wait_set_exclusive(wait);
++ poll_wait(filp, &stream->read_wait, wait);
++
++ finalized = stream->finalized;
++
++ /*
++ * lib_ring_buffer_is_finalized() contains a smp_rmb()
++ * ordering finalized load before offsets loads.
++ */
++ WARN_ON(atomic_long_read(&buf->active_readers) != 1);
++
++ if (finalized)
++ mask |= POLLHUP;
++
++ if (stream->metadata_cache->metadata_written >
++ stream->metadata_out)
++ mask |= POLLIN;
++ }
++
++ return mask;
++}
++
++static
++int lttng_metadata_ring_buffer_ioctl_get_next_subbuf(struct file *filp,
++ unsigned int cmd, unsigned long arg)
++{
++ struct lttng_metadata_stream *stream = filp->private_data;
++ struct lib_ring_buffer *buf = stream->priv;
++ struct channel *chan = buf->backend.chan;
++ int ret;
++
++ ret = lttng_metadata_output_channel(stream, chan);
++ if (ret > 0) {
++ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
++ ret = 0;
++ }
++ return ret;
++}
++
++static
++void lttng_metadata_ring_buffer_ioctl_put_next_subbuf(struct file *filp,
++ unsigned int cmd, unsigned long arg)
++{
++ struct lttng_metadata_stream *stream = filp->private_data;
++
++ stream->metadata_out = stream->metadata_in;
++}
++
++static
++long lttng_metadata_ring_buffer_ioctl(struct file *filp,
++ unsigned int cmd, unsigned long arg)
++{
++ int ret;
++ struct lttng_metadata_stream *stream = filp->private_data;
++ struct lib_ring_buffer *buf = stream->priv;
++
++ switch (cmd) {
++ case RING_BUFFER_GET_NEXT_SUBBUF:
++ {
++ ret = lttng_metadata_ring_buffer_ioctl_get_next_subbuf(filp,
++ cmd, arg);
++ if (ret < 0)
++ goto err;
++ break;
++ }
++ case RING_BUFFER_GET_SUBBUF:
++ {
++ /*
++ * Random access is not allowed for metadata channel.
++ */
++ return -ENOSYS;
++ }
++ default:
++ break;
++ }
++ /* PUT_SUBBUF is the one from lib ring buffer, unmodified. */
++
++ /* Performing lib ring buffer ioctl after our own. */
++ ret = lib_ring_buffer_ioctl(filp, cmd, arg, buf);
++ if (ret < 0)
++ goto err;
++
++ switch (cmd) {
++ case RING_BUFFER_PUT_NEXT_SUBBUF:
++ {
++ lttng_metadata_ring_buffer_ioctl_put_next_subbuf(filp,
++ cmd, arg);
++ break;
++ }
++ default:
++ break;
++ }
++err:
++ return ret;
++}
++
++#ifdef CONFIG_COMPAT
++static
++long lttng_metadata_ring_buffer_compat_ioctl(struct file *filp,
++ unsigned int cmd, unsigned long arg)
++{
++ int ret;
++ struct lttng_metadata_stream *stream = filp->private_data;
++ struct lib_ring_buffer *buf = stream->priv;
++
++ switch (cmd) {
++ case RING_BUFFER_GET_NEXT_SUBBUF:
++ {
++ ret = lttng_metadata_ring_buffer_ioctl_get_next_subbuf(filp,
++ cmd, arg);
++ if (ret < 0)
++ goto err;
++ break;
++ }
++ case RING_BUFFER_GET_SUBBUF:
++ {
++ /*
++ * Random access is not allowed for metadata channel.
++ */
++ return -ENOSYS;
++ }
++ default:
++ break;
++ }
++ /* PUT_SUBBUF is the one from lib ring buffer, unmodified. */
++
++ /* Performing lib ring buffer ioctl after our own. */
++ ret = lib_ring_buffer_compat_ioctl(filp, cmd, arg, buf);
++ if (ret < 0)
++ goto err;
++
++ switch (cmd) {
++ case RING_BUFFER_PUT_NEXT_SUBBUF:
++ {
++ lttng_metadata_ring_buffer_ioctl_put_next_subbuf(filp,
++ cmd, arg);
++ break;
++ }
++ default:
++ break;
++ }
++err:
++ return ret;
++}
++#endif
++
++/*
++ * This is not used by anonymous file descriptors. This code is left
++ * there if we ever want to implement an inode with open() operation.
++ */
++static
++int lttng_metadata_ring_buffer_open(struct inode *inode, struct file *file)
++{
++ struct lttng_metadata_stream *stream = inode->i_private;
++ struct lib_ring_buffer *buf = stream->priv;
++
++ file->private_data = buf;
++ /*
++ * Since life-time of metadata cache differs from that of
++ * session, we need to keep our own reference on the transport.
++ */
++ if (!try_module_get(stream->transport->owner)) {
++ printk(KERN_WARNING "LTT : Can't lock transport module.\n");
++ return -EBUSY;
++ }
++ return lib_ring_buffer_open(inode, file, buf);
++}
++
++static
++int lttng_metadata_ring_buffer_release(struct inode *inode, struct file *file)
++{
++ struct lttng_metadata_stream *stream = file->private_data;
++ struct lib_ring_buffer *buf = stream->priv;
++
++ kref_put(&stream->metadata_cache->refcount, metadata_cache_destroy);
++ module_put(stream->transport->owner);
++ return lib_ring_buffer_release(inode, file, buf);
++}
++
++static
++ssize_t lttng_metadata_ring_buffer_splice_read(struct file *in, loff_t *ppos,
++ struct pipe_inode_info *pipe, size_t len,
++ unsigned int flags)
++{
++ struct lttng_metadata_stream *stream = in->private_data;
++ struct lib_ring_buffer *buf = stream->priv;
++
++ return lib_ring_buffer_splice_read(in, ppos, pipe, len,
++ flags, buf);
++}
++
++static
++int lttng_metadata_ring_buffer_mmap(struct file *filp,
++ struct vm_area_struct *vma)
++{
++ struct lttng_metadata_stream *stream = filp->private_data;
++ struct lib_ring_buffer *buf = stream->priv;
++
++ return lib_ring_buffer_mmap(filp, vma, buf);
++}
++
++static
++const struct file_operations lttng_metadata_ring_buffer_file_operations = {
++ .owner = THIS_MODULE,
++ .open = lttng_metadata_ring_buffer_open,
++ .release = lttng_metadata_ring_buffer_release,
++ .poll = lttng_metadata_ring_buffer_poll,
++ .splice_read = lttng_metadata_ring_buffer_splice_read,
++ .mmap = lttng_metadata_ring_buffer_mmap,
++ .unlocked_ioctl = lttng_metadata_ring_buffer_ioctl,
++ .llseek = vfs_lib_ring_buffer_no_llseek,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = lttng_metadata_ring_buffer_compat_ioctl,
++#endif
++};
++
++static
++int lttng_abi_create_stream_fd(struct file *channel_file, void *stream_priv,
++ const struct file_operations *fops)
++{
++ int stream_fd, ret;
++ struct file *stream_file;
++
++ stream_fd = get_unused_fd();
++ if (stream_fd < 0) {
++ ret = stream_fd;
++ goto fd_error;
++ }
++ stream_file = anon_inode_getfile("[lttng_stream]", fops,
++ stream_priv, O_RDWR);
++ if (IS_ERR(stream_file)) {
++ ret = PTR_ERR(stream_file);
++ goto file_error;
++ }
++ /*
++ * OPEN_FMODE, called within anon_inode_getfile/alloc_file, don't honor
++ * FMODE_LSEEK, FMODE_PREAD nor FMODE_PWRITE. We need to read from this
++ * file descriptor, so we set FMODE_PREAD here.
++ */
++ stream_file->f_mode |= FMODE_PREAD;
++ fd_install(stream_fd, stream_file);
++ /*
++ * The stream holds a reference to the channel within the generic ring
++ * buffer library, so no need to hold a refcount on the channel and
++ * session files here.
++ */
++ return stream_fd;
++
++file_error:
++ put_unused_fd(stream_fd);
++fd_error:
++ return ret;
++}
++
++static
++int lttng_abi_open_stream(struct file *channel_file)
++{
++ struct lttng_channel *channel = channel_file->private_data;
++ struct lib_ring_buffer *buf;
++ int ret;
++ void *stream_priv;
++
++ buf = channel->ops->buffer_read_open(channel->chan);
++ if (!buf)
++ return -ENOENT;
++
++ stream_priv = buf;
++ ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
++ &lib_ring_buffer_file_operations);
++ if (ret < 0)
++ goto fd_error;
++
++ return ret;
++
++fd_error:
++ channel->ops->buffer_read_close(buf);
++ return ret;
++}
++
++static
++int lttng_abi_open_metadata_stream(struct file *channel_file)
++{
++ struct lttng_channel *channel = channel_file->private_data;
++ struct lttng_session *session = channel->session;
++ struct lib_ring_buffer *buf;
++ int ret;
++ struct lttng_metadata_stream *metadata_stream;
++ void *stream_priv;
++
++ buf = channel->ops->buffer_read_open(channel->chan);
++ if (!buf)
++ return -ENOENT;
++
++ metadata_stream = kzalloc(sizeof(struct lttng_metadata_stream),
++ GFP_KERNEL);
++ if (!metadata_stream) {
++ ret = -ENOMEM;
++ goto nomem;
++ }
++ metadata_stream->metadata_cache = session->metadata_cache;
++ init_waitqueue_head(&metadata_stream->read_wait);
++ metadata_stream->priv = buf;
++ stream_priv = metadata_stream;
++ metadata_stream->transport = channel->transport;
++
++ /*
++ * Since life-time of metadata cache differs from that of
++ * session, we need to keep our own reference on the transport.
++ */
++ if (!try_module_get(metadata_stream->transport->owner)) {
++ printk(KERN_WARNING "LTT : Can't lock transport module.\n");
++ ret = -EINVAL;
++ goto notransport;
++ }
++
++ ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
++ &lttng_metadata_ring_buffer_file_operations);
++ if (ret < 0)
++ goto fd_error;
++
++ kref_get(&session->metadata_cache->refcount);
++ list_add(&metadata_stream->list,
++ &session->metadata_cache->metadata_stream);
++ return ret;
++
++fd_error:
++ module_put(metadata_stream->transport->owner);
++notransport:
++ kfree(metadata_stream);
++nomem:
++ channel->ops->buffer_read_close(buf);
++ return ret;
++}
++
++static
++int lttng_abi_create_event(struct file *channel_file,
++ struct lttng_kernel_event *event_param)
++{
++ struct lttng_channel *channel = channel_file->private_data;
++ struct lttng_event *event;
++ int event_fd, ret;
++ struct file *event_file;
++
++ event_param->name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
++ switch (event_param->instrumentation) {
++ case LTTNG_KERNEL_KRETPROBE:
++ event_param->u.kretprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
++ break;
++ case LTTNG_KERNEL_KPROBE:
++ event_param->u.kprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
++ break;
++ case LTTNG_KERNEL_FUNCTION:
++ event_param->u.ftrace.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
++ break;
++ default:
++ break;
++ }
++ switch (event_param->instrumentation) {
++ default:
++ event_fd = get_unused_fd();
++ if (event_fd < 0) {
++ ret = event_fd;
++ goto fd_error;
++ }
++ event_file = anon_inode_getfile("[lttng_event]",
++ &lttng_event_fops,
++ NULL, O_RDWR);
++ if (IS_ERR(event_file)) {
++ ret = PTR_ERR(event_file);
++ goto file_error;
++ }
++ /*
++ * We tolerate no failure path after event creation. It
++ * will stay invariant for the rest of the session.
++ */
++ event = lttng_event_create(channel, event_param, NULL, NULL);
++ if (!event) {
++ ret = -EINVAL;
++ goto event_error;
++ }
++ event_file->private_data = event;
++ fd_install(event_fd, event_file);
++ /* The event holds a reference on the channel */
++ atomic_long_inc(&channel_file->f_count);
++ break;
++ case LTTNG_KERNEL_SYSCALL:
++ /*
++ * Only all-syscall tracing supported for now.
++ */
++ if (event_param->name[0] != '\0')
++ return -EINVAL;
++ ret = lttng_syscalls_register(channel, NULL);
++ if (ret)
++ goto fd_error;
++ event_fd = 0;
++ break;
++ }
++ return event_fd;
++
++event_error:
++ fput(event_file);
++file_error:
++ put_unused_fd(event_fd);
++fd_error:
++ return ret;
++}
++
++/**
++ * lttng_channel_ioctl - lttng syscall through ioctl
++ *
++ * @file: the file
++ * @cmd: the command
++ * @arg: command arg
++ *
++ * This ioctl implements lttng commands:
++ * LTTNG_KERNEL_STREAM
++ * Returns an event stream file descriptor or failure.
++ * (typically, one event stream records events from one CPU)
++ * LTTNG_KERNEL_EVENT
++ * Returns an event file descriptor or failure.
++ * LTTNG_KERNEL_CONTEXT
++ * Prepend a context field to each event in the channel
++ * LTTNG_KERNEL_ENABLE
++ * Enable recording for events in this channel (weak enable)
++ * LTTNG_KERNEL_DISABLE
++ * Disable recording for events in this channel (strong disable)
++ *
++ * Channel and event file descriptors also hold a reference on the session.
++ */
++static
++long lttng_channel_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ struct lttng_channel *channel = file->private_data;
++
++ switch (cmd) {
++ case LTTNG_KERNEL_OLD_STREAM:
++ case LTTNG_KERNEL_STREAM:
++ return lttng_abi_open_stream(file);
++ case LTTNG_KERNEL_OLD_EVENT:
++ {
++ struct lttng_kernel_event *uevent_param;
++ struct lttng_kernel_old_event *old_uevent_param;
++ int ret;
++
++ uevent_param = kmalloc(sizeof(struct lttng_kernel_event),
++ GFP_KERNEL);
++ if (!uevent_param) {
++ ret = -ENOMEM;
++ goto old_event_end;
++ }
++ old_uevent_param = kmalloc(
++ sizeof(struct lttng_kernel_old_event),
++ GFP_KERNEL);
++ if (!old_uevent_param) {
++ ret = -ENOMEM;
++ goto old_event_error_free_param;
++ }
++ if (copy_from_user(old_uevent_param,
++ (struct lttng_kernel_old_event __user *) arg,
++ sizeof(struct lttng_kernel_old_event))) {
++ ret = -EFAULT;
++ goto old_event_error_free_old_param;
++ }
++
++ memcpy(uevent_param->name, old_uevent_param->name,
++ sizeof(uevent_param->name));
++ uevent_param->instrumentation =
++ old_uevent_param->instrumentation;
++
++ switch (old_uevent_param->instrumentation) {
++ case LTTNG_KERNEL_KPROBE:
++ uevent_param->u.kprobe.addr =
++ old_uevent_param->u.kprobe.addr;
++ uevent_param->u.kprobe.offset =
++ old_uevent_param->u.kprobe.offset;
++ memcpy(uevent_param->u.kprobe.symbol_name,
++ old_uevent_param->u.kprobe.symbol_name,
++ sizeof(uevent_param->u.kprobe.symbol_name));
++ break;
++ case LTTNG_KERNEL_KRETPROBE:
++ uevent_param->u.kretprobe.addr =
++ old_uevent_param->u.kretprobe.addr;
++ uevent_param->u.kretprobe.offset =
++ old_uevent_param->u.kretprobe.offset;
++ memcpy(uevent_param->u.kretprobe.symbol_name,
++ old_uevent_param->u.kretprobe.symbol_name,
++ sizeof(uevent_param->u.kretprobe.symbol_name));
++ break;
++ case LTTNG_KERNEL_FUNCTION:
++ memcpy(uevent_param->u.ftrace.symbol_name,
++ old_uevent_param->u.ftrace.symbol_name,
++ sizeof(uevent_param->u.ftrace.symbol_name));
++ break;
++ default:
++ break;
++ }
++ ret = lttng_abi_create_event(file, uevent_param);
++
++old_event_error_free_old_param:
++ kfree(old_uevent_param);
++old_event_error_free_param:
++ kfree(uevent_param);
++old_event_end:
++ return ret;
++ }
++ case LTTNG_KERNEL_EVENT:
++ {
++ struct lttng_kernel_event uevent_param;
++
++ if (copy_from_user(&uevent_param,
++ (struct lttng_kernel_event __user *) arg,
++ sizeof(uevent_param)))
++ return -EFAULT;
++ return lttng_abi_create_event(file, &uevent_param);
++ }
++ case LTTNG_KERNEL_OLD_CONTEXT:
++ {
++ struct lttng_kernel_context *ucontext_param;
++ struct lttng_kernel_old_context *old_ucontext_param;
++ int ret;
++
++ ucontext_param = kmalloc(sizeof(struct lttng_kernel_context),
++ GFP_KERNEL);
++ if (!ucontext_param) {
++ ret = -ENOMEM;
++ goto old_ctx_end;
++ }
++ old_ucontext_param = kmalloc(sizeof(struct lttng_kernel_old_context),
++ GFP_KERNEL);
++ if (!old_ucontext_param) {
++ ret = -ENOMEM;
++ goto old_ctx_error_free_param;
++ }
++
++ if (copy_from_user(old_ucontext_param,
++ (struct lttng_kernel_old_context __user *) arg,
++ sizeof(struct lttng_kernel_old_context))) {
++ ret = -EFAULT;
++ goto old_ctx_error_free_old_param;
++ }
++ ucontext_param->ctx = old_ucontext_param->ctx;
++ memcpy(ucontext_param->padding, old_ucontext_param->padding,
++ sizeof(ucontext_param->padding));
++ /* only type that uses the union */
++ if (old_ucontext_param->ctx == LTTNG_KERNEL_CONTEXT_PERF_COUNTER) {
++ ucontext_param->u.perf_counter.type =
++ old_ucontext_param->u.perf_counter.type;
++ ucontext_param->u.perf_counter.config =
++ old_ucontext_param->u.perf_counter.config;
++ memcpy(ucontext_param->u.perf_counter.name,
++ old_ucontext_param->u.perf_counter.name,
++ sizeof(ucontext_param->u.perf_counter.name));
++ }
++
++ ret = lttng_abi_add_context(file,
++ ucontext_param,
++ &channel->ctx, channel->session);
++
++old_ctx_error_free_old_param:
++ kfree(old_ucontext_param);
++old_ctx_error_free_param:
++ kfree(ucontext_param);
++old_ctx_end:
++ return ret;
++ }
++ case LTTNG_KERNEL_CONTEXT:
++ {
++ struct lttng_kernel_context ucontext_param;
++
++ if (copy_from_user(&ucontext_param,
++ (struct lttng_kernel_context __user *) arg,
++ sizeof(ucontext_param)))
++ return -EFAULT;
++ return lttng_abi_add_context(file,
++ &ucontext_param,
++ &channel->ctx, channel->session);
++ }
++ case LTTNG_KERNEL_OLD_ENABLE:
++ case LTTNG_KERNEL_ENABLE:
++ return lttng_channel_enable(channel);
++ case LTTNG_KERNEL_OLD_DISABLE:
++ case LTTNG_KERNEL_DISABLE:
++ return lttng_channel_disable(channel);
++ default:
++ return -ENOIOCTLCMD;
++ }
++
++}
++
++/**
++ * lttng_metadata_ioctl - lttng syscall through ioctl
++ *
++ * @file: the file
++ * @cmd: the command
++ * @arg: command arg
++ *
++ * This ioctl implements lttng commands:
++ * LTTNG_KERNEL_STREAM
++ * Returns an event stream file descriptor or failure.
++ *
++ * Channel and event file descriptors also hold a reference on the session.
++ */
++static
++long lttng_metadata_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ switch (cmd) {
++ case LTTNG_KERNEL_OLD_STREAM:
++ case LTTNG_KERNEL_STREAM:
++ return lttng_abi_open_metadata_stream(file);
++ default:
++ return -ENOIOCTLCMD;
++ }
++}
++
++/**
++ * lttng_channel_poll - lttng stream addition/removal monitoring
++ *
++ * @file: the file
++ * @wait: poll table
++ */
++unsigned int lttng_channel_poll(struct file *file, poll_table *wait)
++{
++ struct lttng_channel *channel = file->private_data;
++ unsigned int mask = 0;
++
++ if (file->f_mode & FMODE_READ) {
++ poll_wait_set_exclusive(wait);
++ poll_wait(file, channel->ops->get_hp_wait_queue(channel->chan),
++ wait);
++
++ if (channel->ops->is_disabled(channel->chan))
++ return POLLERR;
++ if (channel->ops->is_finalized(channel->chan))
++ return POLLHUP;
++ if (channel->ops->buffer_has_read_closed_stream(channel->chan))
++ return POLLIN | POLLRDNORM;
++ return 0;
++ }
++ return mask;
++
++}
++
++static
++int lttng_channel_release(struct inode *inode, struct file *file)
++{
++ struct lttng_channel *channel = file->private_data;
++
++ if (channel)
++ fput(channel->session->file);
++ return 0;
++}
++
++static
++int lttng_metadata_channel_release(struct inode *inode, struct file *file)
++{
++ struct lttng_channel *channel = file->private_data;
++
++ if (channel) {
++ lttng_metadata_channel_destroy(channel);
++ fput(channel->session->file);
++ }
++
++ return 0;
++}
++
++static const struct file_operations lttng_channel_fops = {
++ .owner = THIS_MODULE,
++ .release = lttng_channel_release,
++ .poll = lttng_channel_poll,
++ .unlocked_ioctl = lttng_channel_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = lttng_channel_ioctl,
++#endif
++};
++
++static const struct file_operations lttng_metadata_fops = {
++ .owner = THIS_MODULE,
++ .release = lttng_metadata_channel_release,
++ .unlocked_ioctl = lttng_metadata_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = lttng_metadata_ioctl,
++#endif
++};
++
++/**
++ * lttng_event_ioctl - lttng syscall through ioctl
++ *
++ * @file: the file
++ * @cmd: the command
++ * @arg: command arg
++ *
++ * This ioctl implements lttng commands:
++ * LTTNG_KERNEL_CONTEXT
++ * Prepend a context field to each record of this event
++ * LTTNG_KERNEL_ENABLE
++ * Enable recording for this event (weak enable)
++ * LTTNG_KERNEL_DISABLE
++ * Disable recording for this event (strong disable)
++ */
++static
++long lttng_event_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ struct lttng_event *event = file->private_data;
++
++ switch (cmd) {
++ case LTTNG_KERNEL_OLD_CONTEXT:
++ {
++ struct lttng_kernel_context *ucontext_param;
++ struct lttng_kernel_old_context *old_ucontext_param;
++ int ret;
++
++ ucontext_param = kmalloc(sizeof(struct lttng_kernel_context),
++ GFP_KERNEL);
++ if (!ucontext_param) {
++ ret = -ENOMEM;
++ goto old_ctx_end;
++ }
++ old_ucontext_param = kmalloc(sizeof(struct lttng_kernel_old_context),
++ GFP_KERNEL);
++ if (!old_ucontext_param) {
++ ret = -ENOMEM;
++ goto old_ctx_error_free_param;
++ }
++
++ if (copy_from_user(old_ucontext_param,
++ (struct lttng_kernel_old_context __user *) arg,
++ sizeof(struct lttng_kernel_old_context))) {
++ ret = -EFAULT;
++ goto old_ctx_error_free_old_param;
++ }
++ ucontext_param->ctx = old_ucontext_param->ctx;
++ memcpy(ucontext_param->padding, old_ucontext_param->padding,
++ sizeof(ucontext_param->padding));
++ /* only type that uses the union */
++ if (old_ucontext_param->ctx == LTTNG_KERNEL_CONTEXT_PERF_COUNTER) {
++ ucontext_param->u.perf_counter.type =
++ old_ucontext_param->u.perf_counter.type;
++ ucontext_param->u.perf_counter.config =
++ old_ucontext_param->u.perf_counter.config;
++ memcpy(ucontext_param->u.perf_counter.name,
++ old_ucontext_param->u.perf_counter.name,
++ sizeof(ucontext_param->u.perf_counter.name));
++ }
++
++ ret = lttng_abi_add_context(file,
++ ucontext_param,
++ &event->ctx, event->chan->session);
++
++old_ctx_error_free_old_param:
++ kfree(old_ucontext_param);
++old_ctx_error_free_param:
++ kfree(ucontext_param);
++old_ctx_end:
++ return ret;
++ }
++ case LTTNG_KERNEL_CONTEXT:
++ {
++ struct lttng_kernel_context ucontext_param;
++
++ if (copy_from_user(&ucontext_param,
++ (struct lttng_kernel_context __user *) arg,
++ sizeof(ucontext_param)))
++ return -EFAULT;
++ return lttng_abi_add_context(file,
++ &ucontext_param,
++ &event->ctx, event->chan->session);
++ }
++ case LTTNG_KERNEL_OLD_ENABLE:
++ case LTTNG_KERNEL_ENABLE:
++ return lttng_event_enable(event);
++ case LTTNG_KERNEL_OLD_DISABLE:
++ case LTTNG_KERNEL_DISABLE:
++ return lttng_event_disable(event);
++ default:
++ return -ENOIOCTLCMD;
++ }
++}
++
++static
++int lttng_event_release(struct inode *inode, struct file *file)
++{
++ struct lttng_event *event = file->private_data;
++
++ if (event)
++ fput(event->chan->file);
++ return 0;
++}
++
++/* TODO: filter control ioctl */
++static const struct file_operations lttng_event_fops = {
++ .owner = THIS_MODULE,
++ .release = lttng_event_release,
++ .unlocked_ioctl = lttng_event_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = lttng_event_ioctl,
++#endif
++};
++
++int __init lttng_abi_init(void)
++{
++ int ret = 0;
++
++ wrapper_vmalloc_sync_all();
++ lttng_proc_dentry = proc_create_data("lttng", S_IRUSR | S_IWUSR, NULL,
++ &lttng_fops, NULL);
++
++ if (!lttng_proc_dentry) {
++ printk(KERN_ERR "Error creating LTTng control file\n");
++ ret = -ENOMEM;
++ goto error;
++ }
++error:
++ return ret;
++}
++
++void __exit lttng_abi_exit(void)
++{
++ if (lttng_proc_dentry)
++ remove_proc_entry("lttng", NULL);
++}
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-abi.h
+@@ -0,0 +1,177 @@
++#ifndef _LTTNG_ABI_H
++#define _LTTNG_ABI_H
++
++/*
++ * lttng-abi.h
++ *
++ * LTTng ABI header
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/fs.h>
++
++#define LTTNG_KERNEL_SYM_NAME_LEN 256
++
++enum lttng_kernel_instrumentation {
++ LTTNG_KERNEL_TRACEPOINT = 0,
++ LTTNG_KERNEL_KPROBE = 1,
++ LTTNG_KERNEL_FUNCTION = 2,
++ LTTNG_KERNEL_KRETPROBE = 3,
++ LTTNG_KERNEL_NOOP = 4, /* not hooked */
++ LTTNG_KERNEL_SYSCALL = 5,
++};
++
++/*
++ * LTTng consumer mode
++ */
++enum lttng_kernel_output {
++ LTTNG_KERNEL_SPLICE = 0,
++ LTTNG_KERNEL_MMAP = 1,
++};
++
++/*
++ * LTTng DebugFS ABI structures.
++ */
++#define LTTNG_KERNEL_CHANNEL_PADDING LTTNG_KERNEL_SYM_NAME_LEN + 32
++struct lttng_kernel_channel {
++ uint64_t subbuf_size; /* in bytes */
++ uint64_t num_subbuf;
++ unsigned int switch_timer_interval; /* usecs */
++ unsigned int read_timer_interval; /* usecs */
++ enum lttng_kernel_output output; /* splice, mmap */
++ int overwrite; /* 1: overwrite, 0: discard */
++ char padding[LTTNG_KERNEL_CHANNEL_PADDING];
++}__attribute__((packed));
++
++struct lttng_kernel_kretprobe {
++ uint64_t addr;
++
++ uint64_t offset;
++ char symbol_name[LTTNG_KERNEL_SYM_NAME_LEN];
++}__attribute__((packed));
++
++/*
++ * Either addr is used, or symbol_name and offset.
++ */
++struct lttng_kernel_kprobe {
++ uint64_t addr;
++
++ uint64_t offset;
++ char symbol_name[LTTNG_KERNEL_SYM_NAME_LEN];
++}__attribute__((packed));
++
++struct lttng_kernel_function_tracer {
++ char symbol_name[LTTNG_KERNEL_SYM_NAME_LEN];
++}__attribute__((packed));
++
++/*
++ * For syscall tracing, name = '\0' means "enable all".
++ */
++#define LTTNG_KERNEL_EVENT_PADDING1 16
++#define LTTNG_KERNEL_EVENT_PADDING2 LTTNG_KERNEL_SYM_NAME_LEN + 32
++struct lttng_kernel_event {
++ char name[LTTNG_KERNEL_SYM_NAME_LEN]; /* event name */
++ enum lttng_kernel_instrumentation instrumentation;
++ char padding[LTTNG_KERNEL_EVENT_PADDING1];
++
++ /* Per instrumentation type configuration */
++ union {
++ struct lttng_kernel_kretprobe kretprobe;
++ struct lttng_kernel_kprobe kprobe;
++ struct lttng_kernel_function_tracer ftrace;
++ char padding[LTTNG_KERNEL_EVENT_PADDING2];
++ } u;
++}__attribute__((packed));
++
++struct lttng_kernel_tracer_version {
++ uint32_t major;
++ uint32_t minor;
++ uint32_t patchlevel;
++}__attribute__((packed));
++
++enum lttng_kernel_calibrate_type {
++ LTTNG_KERNEL_CALIBRATE_KRETPROBE,
++};
++
++struct lttng_kernel_calibrate {
++ enum lttng_kernel_calibrate_type type; /* type (input) */
++}__attribute__((packed));
++
++enum lttng_kernel_context_type {
++ LTTNG_KERNEL_CONTEXT_PID = 0,
++ LTTNG_KERNEL_CONTEXT_PERF_COUNTER = 1,
++ LTTNG_KERNEL_CONTEXT_PROCNAME = 2,
++ LTTNG_KERNEL_CONTEXT_PRIO = 3,
++ LTTNG_KERNEL_CONTEXT_NICE = 4,
++ LTTNG_KERNEL_CONTEXT_VPID = 5,
++ LTTNG_KERNEL_CONTEXT_TID = 6,
++ LTTNG_KERNEL_CONTEXT_VTID = 7,
++ LTTNG_KERNEL_CONTEXT_PPID = 8,
++ LTTNG_KERNEL_CONTEXT_VPPID = 9,
++ LTTNG_KERNEL_CONTEXT_HOSTNAME = 10,
++};
++
++struct lttng_kernel_perf_counter_ctx {
++ uint32_t type;
++ uint64_t config;
++ char name[LTTNG_KERNEL_SYM_NAME_LEN];
++}__attribute__((packed));
++
++#define LTTNG_KERNEL_CONTEXT_PADDING1 16
++#define LTTNG_KERNEL_CONTEXT_PADDING2 LTTNG_KERNEL_SYM_NAME_LEN + 32
++struct lttng_kernel_context {
++ enum lttng_kernel_context_type ctx;
++ char padding[LTTNG_KERNEL_CONTEXT_PADDING1];
++
++ union {
++ struct lttng_kernel_perf_counter_ctx perf_counter;
++ char padding[LTTNG_KERNEL_CONTEXT_PADDING2];
++ } u;
++}__attribute__((packed));
++
++/* LTTng file descriptor ioctl */
++#define LTTNG_KERNEL_SESSION _IO(0xF6, 0x45)
++#define LTTNG_KERNEL_TRACER_VERSION \
++ _IOR(0xF6, 0x46, struct lttng_kernel_tracer_version)
++#define LTTNG_KERNEL_TRACEPOINT_LIST _IO(0xF6, 0x47)
++#define LTTNG_KERNEL_WAIT_QUIESCENT _IO(0xF6, 0x48)
++#define LTTNG_KERNEL_CALIBRATE \
++ _IOWR(0xF6, 0x49, struct lttng_kernel_calibrate)
++
++/* Session FD ioctl */
++#define LTTNG_KERNEL_METADATA \
++ _IOW(0xF6, 0x54, struct lttng_kernel_channel)
++#define LTTNG_KERNEL_CHANNEL \
++ _IOW(0xF6, 0x55, struct lttng_kernel_channel)
++#define LTTNG_KERNEL_SESSION_START _IO(0xF6, 0x56)
++#define LTTNG_KERNEL_SESSION_STOP _IO(0xF6, 0x57)
++
++/* Channel FD ioctl */
++#define LTTNG_KERNEL_STREAM _IO(0xF6, 0x62)
++#define LTTNG_KERNEL_EVENT \
++ _IOW(0xF6, 0x63, struct lttng_kernel_event)
++
++/* Event and Channel FD ioctl */
++#define LTTNG_KERNEL_CONTEXT \
++ _IOW(0xF6, 0x71, struct lttng_kernel_context)
++
++/* Event, Channel and Session ioctl */
++#define LTTNG_KERNEL_ENABLE _IO(0xF6, 0x82)
++#define LTTNG_KERNEL_DISABLE _IO(0xF6, 0x83)
++
++#endif /* _LTTNG_ABI_H */
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-calibrate.c
+@@ -0,0 +1,42 @@
++/*
++ * lttng-calibrate.c
++ *
++ * LTTng probe calibration.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include "lttng-abi.h"
++#include "lttng-events.h"
++
++noinline
++void lttng_calibrate_kretprobe(void)
++{
++ asm volatile ("");
++}
++
++int lttng_calibrate(struct lttng_kernel_calibrate *calibrate)
++{
++ switch (calibrate->type) {
++ case LTTNG_KERNEL_CALIBRATE_KRETPROBE:
++ lttng_calibrate_kretprobe();
++ break;
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-context-hostname.c
+@@ -0,0 +1,99 @@
++/*
++ * lttng-context-hostname.c
++ *
++ * LTTng hostname context.
++ *
++ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/utsname.h>
++#include "lttng-events.h"
++#include "wrapper/ringbuffer/frontend_types.h"
++#include "wrapper/vmalloc.h"
++#include "lttng-tracer.h"
++
++#define LTTNG_HOSTNAME_CTX_LEN (__NEW_UTS_LEN + 1)
++
++static
++size_t hostname_get_size(size_t offset)
++{
++ size_t size = 0;
++
++ size += LTTNG_HOSTNAME_CTX_LEN;
++ return size;
++}
++
++static
++void hostname_record(struct lttng_ctx_field *field,
++ struct lib_ring_buffer_ctx *ctx,
++ struct lttng_channel *chan)
++{
++ struct nsproxy *nsproxy;
++ struct uts_namespace *ns;
++ char *hostname;
++
++ /*
++ * No need to take the RCU read-side lock to read current
++ * nsproxy. (documented in nsproxy.h)
++ */
++ nsproxy = current->nsproxy;
++ if (nsproxy) {
++ ns = nsproxy->uts_ns;
++ hostname = ns->name.nodename;
++ chan->ops->event_write(ctx, hostname,
++ LTTNG_HOSTNAME_CTX_LEN);
++ } else {
++ chan->ops->event_memset(ctx, 0,
++ LTTNG_HOSTNAME_CTX_LEN);
++ }
++}
++
++int lttng_add_hostname_to_ctx(struct lttng_ctx **ctx)
++{
++ struct lttng_ctx_field *field;
++
++ field = lttng_append_context(ctx);
++ if (!field)
++ return -ENOMEM;
++ if (lttng_find_context(*ctx, "hostname")) {
++ lttng_remove_context_field(ctx, field);
++ return -EEXIST;
++ }
++ field->event_field.name = "hostname";
++ field->event_field.type.atype = atype_array;
++ field->event_field.type.u.array.elem_type.atype = atype_integer;
++ field->event_field.type.u.array.elem_type.u.basic.integer.size = sizeof(char) * CHAR_BIT;
++ field->event_field.type.u.array.elem_type.u.basic.integer.alignment = lttng_alignof(char) * CHAR_BIT;
++ field->event_field.type.u.array.elem_type.u.basic.integer.signedness = lttng_is_signed_type(char);
++ field->event_field.type.u.array.elem_type.u.basic.integer.reverse_byte_order = 0;
++ field->event_field.type.u.array.elem_type.u.basic.integer.base = 10;
++ field->event_field.type.u.array.elem_type.u.basic.integer.encoding = lttng_encode_UTF8;
++ field->event_field.type.u.array.length = LTTNG_HOSTNAME_CTX_LEN;
++
++ field->get_size = hostname_get_size;
++ field->record = hostname_record;
++ wrapper_vmalloc_sync_all();
++ return 0;
++}
++EXPORT_SYMBOL_GPL(lttng_add_hostname_to_ctx);
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-context-nice.c
+@@ -0,0 +1,81 @@
++/*
++ * lttng-context-nice.c
++ *
++ * LTTng nice context.
++ *
++ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include "lttng-events.h"
++#include "wrapper/ringbuffer/frontend_types.h"
++#include "wrapper/vmalloc.h"
++#include "lttng-tracer.h"
++
++static
++size_t nice_get_size(size_t offset)
++{
++ size_t size = 0;
++
++ size += lib_ring_buffer_align(offset, lttng_alignof(int));
++ size += sizeof(int);
++ return size;
++}
++
++static
++void nice_record(struct lttng_ctx_field *field,
++ struct lib_ring_buffer_ctx *ctx,
++ struct lttng_channel *chan)
++{
++ int nice;
++
++ nice = task_nice(current);
++ lib_ring_buffer_align_ctx(ctx, lttng_alignof(nice));
++ chan->ops->event_write(ctx, &nice, sizeof(nice));
++}
++
++int lttng_add_nice_to_ctx(struct lttng_ctx **ctx)
++{
++ struct lttng_ctx_field *field;
++
++ field = lttng_append_context(ctx);
++ if (!field)
++ return -ENOMEM;
++ if (lttng_find_context(*ctx, "nice")) {
++ lttng_remove_context_field(ctx, field);
++ return -EEXIST;
++ }
++ field->event_field.name = "nice";
++ field->event_field.type.atype = atype_integer;
++ field->event_field.type.u.basic.integer.size = sizeof(int) * CHAR_BIT;
++ field->event_field.type.u.basic.integer.alignment = lttng_alignof(int) * CHAR_BIT;
++ field->event_field.type.u.basic.integer.signedness = lttng_is_signed_type(int);
++ field->event_field.type.u.basic.integer.reverse_byte_order = 0;
++ field->event_field.type.u.basic.integer.base = 10;
++ field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
++ field->get_size = nice_get_size;
++ field->record = nice_record;
++ wrapper_vmalloc_sync_all();
++ return 0;
++}
++EXPORT_SYMBOL_GPL(lttng_add_nice_to_ctx);
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("Linux Trace Toolkit Nice Context");
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-context-perf-counters.c
+@@ -0,0 +1,285 @@
++/*
++ * lttng-context-perf-counters.c
++ *
++ * LTTng performance monitoring counters (perf-counters) integration module.
++ *
++ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/perf_event.h>
++#include <linux/list.h>
++#include <linux/string.h>
++#include <linux/cpu.h>
++#include "lttng-events.h"
++#include "wrapper/ringbuffer/frontend_types.h"
++#include "wrapper/vmalloc.h"
++#include "wrapper/perf.h"
++#include "lttng-tracer.h"
++
++static
++size_t perf_counter_get_size(size_t offset)
++{
++ size_t size = 0;
++
++ size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
++ size += sizeof(uint64_t);
++ return size;
++}
++
++static
++void perf_counter_record(struct lttng_ctx_field *field,
++ struct lib_ring_buffer_ctx *ctx,
++ struct lttng_channel *chan)
++{
++ struct perf_event *event;
++ uint64_t value;
++
++ event = field->u.perf_counter->e[ctx->cpu];
++ if (likely(event)) {
++ if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) {
++ value = 0;
++ } else {
++ event->pmu->read(event);
++ value = local64_read(&event->count);
++ }
++ } else {
++ /*
++ * Perf chooses not to be clever and not to support enabling a
++ * perf counter before the cpu is brought up. Therefore, we need
++ * to support having events coming (e.g. scheduler events)
++ * before the counter is setup. Write an arbitrary 0 in this
++ * case.
++ */
++ value = 0;
++ }
++ lib_ring_buffer_align_ctx(ctx, lttng_alignof(value));
++ chan->ops->event_write(ctx, &value, sizeof(value));
++}
++
++#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99))
++static
++void overflow_callback(struct perf_event *event,
++ struct perf_sample_data *data,
++ struct pt_regs *regs)
++{
++}
++#else
++static
++void overflow_callback(struct perf_event *event, int nmi,
++ struct perf_sample_data *data,
++ struct pt_regs *regs)
++{
++}
++#endif
++
++static
++void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
++{
++ struct perf_event **events = field->u.perf_counter->e;
++ int cpu;
++
++ get_online_cpus();
++ for_each_online_cpu(cpu)
++ perf_event_release_kernel(events[cpu]);
++ put_online_cpus();
++#ifdef CONFIG_HOTPLUG_CPU
++ unregister_cpu_notifier(&field->u.perf_counter->nb);
++#endif
++ kfree(field->event_field.name);
++ kfree(field->u.perf_counter->attr);
++ kfree(events);
++ kfree(field->u.perf_counter);
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++/**
++ * lttng_perf_counter_hp_callback - CPU hotplug callback
++ * @nb: notifier block
++ * @action: hotplug action to take
++ * @hcpu: CPU number
++ *
++ * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
++ *
++ * We can setup perf counters when the cpu is online (up prepare seems to be too
++ * soon).
++ */
++static
++int lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
++ unsigned long action,
++ void *hcpu)
++{
++ unsigned int cpu = (unsigned long) hcpu;
++ struct lttng_perf_counter_field *perf_field =
++ container_of(nb, struct lttng_perf_counter_field, nb);
++ struct perf_event **events = perf_field->e;
++ struct perf_event_attr *attr = perf_field->attr;
++ struct perf_event *pevent;
++
++ if (!perf_field->hp_enable)
++ return NOTIFY_OK;
++
++ switch (action) {
++ case CPU_ONLINE:
++ case CPU_ONLINE_FROZEN:
++ pevent = wrapper_perf_event_create_kernel_counter(attr,
++ cpu, NULL, overflow_callback);
++ if (!pevent || IS_ERR(pevent))
++ return NOTIFY_BAD;
++ if (pevent->state == PERF_EVENT_STATE_ERROR) {
++ perf_event_release_kernel(pevent);
++ return NOTIFY_BAD;
++ }
++ barrier(); /* Create perf counter before setting event */
++ events[cpu] = pevent;
++ break;
++ case CPU_UP_CANCELED:
++ case CPU_UP_CANCELED_FROZEN:
++ case CPU_DEAD:
++ case CPU_DEAD_FROZEN:
++ pevent = events[cpu];
++ events[cpu] = NULL;
++ barrier(); /* NULLify event before perf counter teardown */
++ perf_event_release_kernel(pevent);
++ break;
++ }
++ return NOTIFY_OK;
++}
++
++#endif
++
++int lttng_add_perf_counter_to_ctx(uint32_t type,
++ uint64_t config,
++ const char *name,
++ struct lttng_ctx **ctx)
++{
++ struct lttng_ctx_field *field;
++ struct lttng_perf_counter_field *perf_field;
++ struct perf_event **events;
++ struct perf_event_attr *attr;
++ int ret;
++ int cpu;
++ char *name_alloc;
++
++ events = kzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
++ if (!events)
++ return -ENOMEM;
++
++ attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
++ if (!attr) {
++ ret = -ENOMEM;
++ goto error_attr;
++ }
++
++ attr->type = type;
++ attr->config = config;
++ attr->size = sizeof(struct perf_event_attr);
++ attr->pinned = 1;
++ attr->disabled = 0;
++
++ perf_field = kzalloc(sizeof(struct lttng_perf_counter_field), GFP_KERNEL);
++ if (!perf_field) {
++ ret = -ENOMEM;
++ goto error_alloc_perf_field;
++ }
++ perf_field->e = events;
++ perf_field->attr = attr;
++
++ name_alloc = kstrdup(name, GFP_KERNEL);
++ if (!name_alloc) {
++ ret = -ENOMEM;
++ goto name_alloc_error;
++ }
++
++ field = lttng_append_context(ctx);
++ if (!field) {
++ ret = -ENOMEM;
++ goto append_context_error;
++ }
++ if (lttng_find_context(*ctx, name_alloc)) {
++ ret = -EEXIST;
++ goto find_error;
++ }
++
++#ifdef CONFIG_HOTPLUG_CPU
++ perf_field->nb.notifier_call =
++ lttng_perf_counter_cpu_hp_callback;
++ perf_field->nb.priority = 0;
++ register_cpu_notifier(&perf_field->nb);
++#endif
++
++ get_online_cpus();
++ for_each_online_cpu(cpu) {
++ events[cpu] = wrapper_perf_event_create_kernel_counter(attr,
++ cpu, NULL, overflow_callback);
++ if (!events[cpu] || IS_ERR(events[cpu])) {
++ ret = -EINVAL;
++ goto counter_error;
++ }
++ if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
++ ret = -EBUSY;
++ goto counter_busy;
++ }
++ }
++ put_online_cpus();
++
++ field->destroy = lttng_destroy_perf_counter_field;
++
++ field->event_field.name = name_alloc;
++ field->event_field.type.atype = atype_integer;
++ field->event_field.type.u.basic.integer.size = sizeof(uint64_t) * CHAR_BIT;
++ field->event_field.type.u.basic.integer.alignment = lttng_alignof(uint64_t) * CHAR_BIT;
++ field->event_field.type.u.basic.integer.signedness = lttng_is_signed_type(uint64_t);
++ field->event_field.type.u.basic.integer.reverse_byte_order = 0;
++ field->event_field.type.u.basic.integer.base = 10;
++ field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
++ field->get_size = perf_counter_get_size;
++ field->record = perf_counter_record;
++ field->u.perf_counter = perf_field;
++ perf_field->hp_enable = 1;
++
++ wrapper_vmalloc_sync_all();
++ return 0;
++
++counter_busy:
++counter_error:
++ for_each_online_cpu(cpu) {
++ if (events[cpu] && !IS_ERR(events[cpu]))
++ perf_event_release_kernel(events[cpu]);
++ }
++ put_online_cpus();
++#ifdef CONFIG_HOTPLUG_CPU
++ unregister_cpu_notifier(&perf_field->nb);
++#endif
++find_error:
++ lttng_remove_context_field(ctx, field);
++append_context_error:
++ kfree(name_alloc);
++name_alloc_error:
++ kfree(perf_field);
++error_alloc_perf_field:
++ kfree(attr);
++error_attr:
++ kfree(events);
++ return ret;
++}
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-context-pid.c
+@@ -0,0 +1,81 @@
++/*
++ * lttng-context-pid.c
++ *
++ * LTTng PID context.
++ *
++ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include "lttng-events.h"
++#include "wrapper/ringbuffer/frontend_types.h"
++#include "wrapper/vmalloc.h"
++#include "lttng-tracer.h"
++
++static
++size_t pid_get_size(size_t offset)
++{
++ size_t size = 0;
++
++ size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
++ size += sizeof(pid_t);
++ return size;
++}
++
++static
++void pid_record(struct lttng_ctx_field *field,
++ struct lib_ring_buffer_ctx *ctx,
++ struct lttng_channel *chan)
++{
++ pid_t pid;
++
++ pid = task_tgid_nr(current);
++ lib_ring_buffer_align_ctx(ctx, lttng_alignof(pid));
++ chan->ops->event_write(ctx, &pid, sizeof(pid));
++}
++
++int lttng_add_pid_to_ctx(struct lttng_ctx **ctx)
++{
++ struct lttng_ctx_field *field;
++
++ field = lttng_append_context(ctx);
++ if (!field)
++ return -ENOMEM;
++ if (lttng_find_context(*ctx, "pid")) {
++ lttng_remove_context_field(ctx, field);
++ return -EEXIST;
++ }
++ field->event_field.name = "pid";
++ field->event_field.type.atype = atype_integer;
++ field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
++ field->event_field.type.u.basic.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
++ field->event_field.type.u.basic.integer.signedness = lttng_is_signed_type(pid_t);
++ field->event_field.type.u.basic.integer.reverse_byte_order = 0;
++ field->event_field.type.u.basic.integer.base = 10;
++ field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
++ field->get_size = pid_get_size;
++ field->record = pid_record;
++ wrapper_vmalloc_sync_all();
++ return 0;
++}
++EXPORT_SYMBOL_GPL(lttng_add_pid_to_ctx);
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("Linux Trace Toolkit PID Context");
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-context-ppid.c
+@@ -0,0 +1,93 @@
++/*
++ * lttng-context-ppid.c
++ *
++ * LTTng PPID context.
++ *
++ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/syscalls.h>
++#include "lttng-events.h"
++#include "wrapper/ringbuffer/frontend_types.h"
++#include "wrapper/vmalloc.h"
++#include "lttng-tracer.h"
++
++static
++size_t ppid_get_size(size_t offset)
++{
++ size_t size = 0;
++
++ size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
++ size += sizeof(pid_t);
++ return size;
++}
++
++static
++void ppid_record(struct lttng_ctx_field *field,
++ struct lib_ring_buffer_ctx *ctx,
++ struct lttng_channel *chan)
++{
++ pid_t ppid;
++
++
++ /*
++ * TODO: when we eventually add RCU subsystem instrumentation,
++ * taking the rcu read lock here will trigger RCU tracing
++ * recursively. We should modify the kernel synchronization so
++ * it synchronizes both for RCU and RCU sched, and rely on
++ * rcu_read_lock_sched_notrace.
++ */
++
++ rcu_read_lock();
++ ppid = task_tgid_nr(current->real_parent);
++ rcu_read_unlock();
++ lib_ring_buffer_align_ctx(ctx, lttng_alignof(ppid));
++ chan->ops->event_write(ctx, &ppid, sizeof(ppid));
++}
++
++int lttng_add_ppid_to_ctx(struct lttng_ctx **ctx)
++{
++ struct lttng_ctx_field *field;
++
++ field = lttng_append_context(ctx);
++ if (!field)
++ return -ENOMEM;
++ if (lttng_find_context(*ctx, "ppid")) {
++ lttng_remove_context_field(ctx, field);
++ return -EEXIST;
++ }
++ field->event_field.name = "ppid";
++ field->event_field.type.atype = atype_integer;
++ field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
++ field->event_field.type.u.basic.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
++ field->event_field.type.u.basic.integer.signedness = lttng_is_signed_type(pid_t);
++ field->event_field.type.u.basic.integer.reverse_byte_order = 0;
++ field->event_field.type.u.basic.integer.base = 10;
++ field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
++ field->get_size = ppid_get_size;
++ field->record = ppid_record;
++ wrapper_vmalloc_sync_all();
++ return 0;
++}
++EXPORT_SYMBOL_GPL(lttng_add_ppid_to_ctx);
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("Linux Trace Toolkit PPID Context");
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-context-prio.c
+@@ -0,0 +1,102 @@
++/*
++ * lttng-context-prio.c
++ *
++ * LTTng priority context.
++ *
++ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include "lttng-events.h"
++#include "wrapper/ringbuffer/frontend_types.h"
++#include "wrapper/vmalloc.h"
++#include "wrapper/kallsyms.h"
++#include "lttng-tracer.h"
++
++static
++int (*wrapper_task_prio_sym)(struct task_struct *t);
++
++int wrapper_task_prio_init(void)
++{
++ wrapper_task_prio_sym = (void *) kallsyms_lookup_funcptr("task_prio");
++ if (!wrapper_task_prio_sym) {
++ printk(KERN_WARNING "LTTng: task_prio symbol lookup failed.\n");
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static
++size_t prio_get_size(size_t offset)
++{
++ size_t size = 0;
++
++ size += lib_ring_buffer_align(offset, lttng_alignof(int));
++ size += sizeof(int);
++ return size;
++}
++
++static
++void prio_record(struct lttng_ctx_field *field,
++ struct lib_ring_buffer_ctx *ctx,
++ struct lttng_channel *chan)
++{
++ int prio;
++
++ prio = wrapper_task_prio_sym(current);
++ lib_ring_buffer_align_ctx(ctx, lttng_alignof(prio));
++ chan->ops->event_write(ctx, &prio, sizeof(prio));
++}
++
++int lttng_add_prio_to_ctx(struct lttng_ctx **ctx)
++{
++ struct lttng_ctx_field *field;
++ int ret;
++
++ if (!wrapper_task_prio_sym) {
++ ret = wrapper_task_prio_init();
++ if (ret)
++ return ret;
++ }
++
++ field = lttng_append_context(ctx);
++ if (!field)
++ return -ENOMEM;
++ if (lttng_find_context(*ctx, "prio")) {
++ lttng_remove_context_field(ctx, field);
++ return -EEXIST;
++ }
++ field->event_field.name = "prio";
++ field->event_field.type.atype = atype_integer;
++ field->event_field.type.u.basic.integer.size = sizeof(int) * CHAR_BIT;
++ field->event_field.type.u.basic.integer.alignment = lttng_alignof(int) * CHAR_BIT;
++ field->event_field.type.u.basic.integer.signedness = lttng_is_signed_type(int);
++ field->event_field.type.u.basic.integer.reverse_byte_order = 0;
++ field->event_field.type.u.basic.integer.base = 10;
++ field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
++ field->get_size = prio_get_size;
++ field->record = prio_record;
++ wrapper_vmalloc_sync_all();
++ return 0;
++}
++EXPORT_SYMBOL_GPL(lttng_add_prio_to_ctx);
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("Linux Trace Toolkit Priority Context");
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-context-procname.c
+@@ -0,0 +1,85 @@
++/*
++ * lttng-context-procname.c
++ *
++ * LTTng procname context.
++ *
++ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include "lttng-events.h"
++#include "wrapper/ringbuffer/frontend_types.h"
++#include "wrapper/vmalloc.h"
++#include "lttng-tracer.h"
++
++static
++size_t procname_get_size(size_t offset)
++{
++ size_t size = 0;
++
++ size += sizeof(current->comm);
++ return size;
++}
++
++/*
++ * Racy read of procname. We simply copy its whole array size.
++ * Races with /proc/<task>/procname write only.
++ * Otherwise having to take a mutex for each event is cumbersome and
++ * could lead to crash in IRQ context and deadlock of the lockdep tracer.
++ */
++static
++void procname_record(struct lttng_ctx_field *field,
++ struct lib_ring_buffer_ctx *ctx,
++ struct lttng_channel *chan)
++{
++ chan->ops->event_write(ctx, current->comm, sizeof(current->comm));
++}
++
++int lttng_add_procname_to_ctx(struct lttng_ctx **ctx)
++{
++ struct lttng_ctx_field *field;
++
++ field = lttng_append_context(ctx);
++ if (!field)
++ return -ENOMEM;
++ if (lttng_find_context(*ctx, "procname")) {
++ lttng_remove_context_field(ctx, field);
++ return -EEXIST;
++ }
++ field->event_field.name = "procname";
++ field->event_field.type.atype = atype_array;
++ field->event_field.type.u.array.elem_type.atype = atype_integer;
++ field->event_field.type.u.array.elem_type.u.basic.integer.size = sizeof(char) * CHAR_BIT;
++ field->event_field.type.u.array.elem_type.u.basic.integer.alignment = lttng_alignof(char) * CHAR_BIT;
++ field->event_field.type.u.array.elem_type.u.basic.integer.signedness = lttng_is_signed_type(char);
++ field->event_field.type.u.array.elem_type.u.basic.integer.reverse_byte_order = 0;
++ field->event_field.type.u.array.elem_type.u.basic.integer.base = 10;
++ field->event_field.type.u.array.elem_type.u.basic.integer.encoding = lttng_encode_UTF8;
++ field->event_field.type.u.array.length = sizeof(current->comm);
++
++ field->get_size = procname_get_size;
++ field->record = procname_record;
++ wrapper_vmalloc_sync_all();
++ return 0;
++}
++EXPORT_SYMBOL_GPL(lttng_add_procname_to_ctx);
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-context-tid.c
+@@ -0,0 +1,81 @@
++/*
++ * lttng-context-tid.c
++ *
++ * LTTng TID context.
++ *
++ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include "lttng-events.h"
++#include "wrapper/ringbuffer/frontend_types.h"
++#include "wrapper/vmalloc.h"
++#include "lttng-tracer.h"
++
++static
++size_t tid_get_size(size_t offset)
++{
++ size_t size = 0;
++
++ size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
++ size += sizeof(pid_t);
++ return size;
++}
++
++static
++void tid_record(struct lttng_ctx_field *field,
++ struct lib_ring_buffer_ctx *ctx,
++ struct lttng_channel *chan)
++{
++ pid_t tid;
++
++ tid = task_pid_nr(current);
++ lib_ring_buffer_align_ctx(ctx, lttng_alignof(tid));
++ chan->ops->event_write(ctx, &tid, sizeof(tid));
++}
++
++int lttng_add_tid_to_ctx(struct lttng_ctx **ctx)
++{
++ struct lttng_ctx_field *field;
++
++ field = lttng_append_context(ctx);
++ if (!field)
++ return -ENOMEM;
++ if (lttng_find_context(*ctx, "tid")) {
++ lttng_remove_context_field(ctx, field);
++ return -EEXIST;
++ }
++ field->event_field.name = "tid";
++ field->event_field.type.atype = atype_integer;
++ field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
++ field->event_field.type.u.basic.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
++ field->event_field.type.u.basic.integer.signedness = lttng_is_signed_type(pid_t);
++ field->event_field.type.u.basic.integer.reverse_byte_order = 0;
++ field->event_field.type.u.basic.integer.base = 10;
++ field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
++ field->get_size = tid_get_size;
++ field->record = tid_record;
++ wrapper_vmalloc_sync_all();
++ return 0;
++}
++EXPORT_SYMBOL_GPL(lttng_add_tid_to_ctx);
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("Linux Trace Toolkit TID Context");
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-context-vpid.c
+@@ -0,0 +1,87 @@
++/*
++ * lttng-context-vpid.c
++ *
++ * LTTng vPID context.
++ *
++ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include "lttng-events.h"
++#include "wrapper/ringbuffer/frontend_types.h"
++#include "wrapper/vmalloc.h"
++#include "lttng-tracer.h"
++
++static
++size_t vpid_get_size(size_t offset)
++{
++ size_t size = 0;
++
++ size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
++ size += sizeof(pid_t);
++ return size;
++}
++
++static
++void vpid_record(struct lttng_ctx_field *field,
++ struct lib_ring_buffer_ctx *ctx,
++ struct lttng_channel *chan)
++{
++ pid_t vpid;
++
++ /*
++ * nsproxy can be NULL when scheduled out of exit.
++ */
++ if (!current->nsproxy)
++ vpid = 0;
++ else
++ vpid = task_tgid_vnr(current);
++ lib_ring_buffer_align_ctx(ctx, lttng_alignof(vpid));
++ chan->ops->event_write(ctx, &vpid, sizeof(vpid));
++}
++
++int lttng_add_vpid_to_ctx(struct lttng_ctx **ctx)
++{
++ struct lttng_ctx_field *field;
++
++ field = lttng_append_context(ctx);
++ if (!field)
++ return -ENOMEM;
++ if (lttng_find_context(*ctx, "vpid")) {
++ lttng_remove_context_field(ctx, field);
++ return -EEXIST;
++ }
++ field->event_field.name = "vpid";
++ field->event_field.type.atype = atype_integer;
++ field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
++ field->event_field.type.u.basic.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
++ field->event_field.type.u.basic.integer.signedness = lttng_is_signed_type(pid_t);
++ field->event_field.type.u.basic.integer.reverse_byte_order = 0;
++ field->event_field.type.u.basic.integer.base = 10;
++ field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
++ field->get_size = vpid_get_size;
++ field->record = vpid_record;
++ wrapper_vmalloc_sync_all();
++ return 0;
++}
++EXPORT_SYMBOL_GPL(lttng_add_vpid_to_ctx);
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("Linux Trace Toolkit vPID Context");
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-context-vppid.c
+@@ -0,0 +1,102 @@
++/*
++ * lttng-context-vppid.c
++ *
++ * LTTng vPPID context.
++ *
++ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/syscalls.h>
++#include "lttng-events.h"
++#include "wrapper/ringbuffer/frontend_types.h"
++#include "wrapper/vmalloc.h"
++#include "lttng-tracer.h"
++
++static
++size_t vppid_get_size(size_t offset)
++{
++ size_t size = 0;
++
++ size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
++ size += sizeof(pid_t);
++ return size;
++}
++
++static
++void vppid_record(struct lttng_ctx_field *field,
++ struct lib_ring_buffer_ctx *ctx,
++ struct lttng_channel *chan)
++{
++ struct task_struct *parent;
++ pid_t vppid;
++
++ /*
++ * current nsproxy can be NULL when scheduled out of exit. pid_vnr uses
++ * the current thread nsproxy to perform the lookup.
++ */
++
++ /*
++ * TODO: when we eventually add RCU subsystem instrumentation,
++ * taking the rcu read lock here will trigger RCU tracing
++ * recursively. We should modify the kernel synchronization so
++ * it synchronizes both for RCU and RCU sched, and rely on
++ * rcu_read_lock_sched_notrace.
++ */
++
++ rcu_read_lock();
++ parent = rcu_dereference(current->real_parent);
++ if (!current->nsproxy)
++ vppid = 0;
++ else
++ vppid = task_tgid_vnr(parent);
++ rcu_read_unlock();
++ lib_ring_buffer_align_ctx(ctx, lttng_alignof(vppid));
++ chan->ops->event_write(ctx, &vppid, sizeof(vppid));
++}
++
++int lttng_add_vppid_to_ctx(struct lttng_ctx **ctx)
++{
++ struct lttng_ctx_field *field;
++
++ field = lttng_append_context(ctx);
++ if (!field)
++ return -ENOMEM;
++ if (lttng_find_context(*ctx, "vppid")) {
++ lttng_remove_context_field(ctx, field);
++ return -EEXIST;
++ }
++ field->event_field.name = "vppid";
++ field->event_field.type.atype = atype_integer;
++ field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
++ field->event_field.type.u.basic.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
++ field->event_field.type.u.basic.integer.signedness = lttng_is_signed_type(pid_t);
++ field->event_field.type.u.basic.integer.reverse_byte_order = 0;
++ field->event_field.type.u.basic.integer.base = 10;
++ field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
++ field->get_size = vppid_get_size;
++ field->record = vppid_record;
++ wrapper_vmalloc_sync_all();
++ return 0;
++}
++EXPORT_SYMBOL_GPL(lttng_add_vppid_to_ctx);
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("Linux Trace Toolkit vPPID Context");
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-context-vtid.c
+@@ -0,0 +1,87 @@
++/*
++ * lttng-context-vtid.c
++ *
++ * LTTng vTID context.
++ *
++ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include "lttng-events.h"
++#include "wrapper/ringbuffer/frontend_types.h"
++#include "wrapper/vmalloc.h"
++#include "lttng-tracer.h"
++
++static
++size_t vtid_get_size(size_t offset)
++{
++ size_t size = 0;
++
++ size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
++ size += sizeof(pid_t);
++ return size;
++}
++
++static
++void vtid_record(struct lttng_ctx_field *field,
++ struct lib_ring_buffer_ctx *ctx,
++ struct lttng_channel *chan)
++{
++ pid_t vtid;
++
++ /*
++ * nsproxy can be NULL when scheduled out of exit.
++ */
++ if (!current->nsproxy)
++ vtid = 0;
++ else
++ vtid = task_pid_vnr(current);
++ lib_ring_buffer_align_ctx(ctx, lttng_alignof(vtid));
++ chan->ops->event_write(ctx, &vtid, sizeof(vtid));
++}
++
++int lttng_add_vtid_to_ctx(struct lttng_ctx **ctx)
++{
++ struct lttng_ctx_field *field;
++
++ field = lttng_append_context(ctx);
++ if (!field)
++ return -ENOMEM;
++ if (lttng_find_context(*ctx, "vtid")) {
++ lttng_remove_context_field(ctx, field);
++ return -EEXIST;
++ }
++ field->event_field.name = "vtid";
++ field->event_field.type.atype = atype_integer;
++ field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
++ field->event_field.type.u.basic.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
++ field->event_field.type.u.basic.integer.signedness = lttng_is_signed_type(pid_t);
++ field->event_field.type.u.basic.integer.reverse_byte_order = 0;
++ field->event_field.type.u.basic.integer.base = 10;
++ field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
++ field->get_size = vtid_get_size;
++ field->record = vtid_record;
++ wrapper_vmalloc_sync_all();
++ return 0;
++}
++EXPORT_SYMBOL_GPL(lttng_add_vtid_to_ctx);
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("Linux Trace Toolkit vTID Context");
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-context.c
+@@ -0,0 +1,105 @@
++/*
++ * lttng-context.c
++ *
++ * LTTng trace/channel/event context management.
++ *
++ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/list.h>
++#include <linux/mutex.h>
++#include <linux/slab.h>
++#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
++#include "lttng-events.h"
++#include "lttng-tracer.h"
++
++int lttng_find_context(struct lttng_ctx *ctx, const char *name)
++{
++ unsigned int i;
++
++ for (i = 0; i < ctx->nr_fields; i++) {
++ /* Skip allocated (but non-initialized) contexts */
++ if (!ctx->fields[i].event_field.name)
++ continue;
++ if (!strcmp(ctx->fields[i].event_field.name, name))
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL_GPL(lttng_find_context);
++
++/*
++ * Note: as we append context information, the pointer location may change.
++ */
++struct lttng_ctx_field *lttng_append_context(struct lttng_ctx **ctx_p)
++{
++ struct lttng_ctx_field *field;
++ struct lttng_ctx *ctx;
++
++ if (!*ctx_p) {
++ *ctx_p = kzalloc(sizeof(struct lttng_ctx), GFP_KERNEL);
++ if (!*ctx_p)
++ return NULL;
++ }
++ ctx = *ctx_p;
++ if (ctx->nr_fields + 1 > ctx->allocated_fields) {
++ struct lttng_ctx_field *new_fields;
++
++ ctx->allocated_fields = max_t(size_t, 1, 2 * ctx->allocated_fields);
++ new_fields = kzalloc(ctx->allocated_fields * sizeof(struct lttng_ctx_field), GFP_KERNEL);
++ if (!new_fields)
++ return NULL;
++ if (ctx->fields)
++ memcpy(new_fields, ctx->fields, sizeof(*ctx->fields) * ctx->nr_fields);
++ kfree(ctx->fields);
++ ctx->fields = new_fields;
++ }
++ field = &ctx->fields[ctx->nr_fields];
++ ctx->nr_fields++;
++ return field;
++}
++EXPORT_SYMBOL_GPL(lttng_append_context);
++
++/*
++ * Remove last context field.
++ */
++void lttng_remove_context_field(struct lttng_ctx **ctx_p,
++ struct lttng_ctx_field *field)
++{
++ struct lttng_ctx *ctx;
++
++ ctx = *ctx_p;
++ ctx->nr_fields--;
++ WARN_ON_ONCE(&ctx->fields[ctx->nr_fields] != field);
++ memset(&ctx->fields[ctx->nr_fields], 0, sizeof(struct lttng_ctx_field));
++}
++EXPORT_SYMBOL_GPL(lttng_remove_context_field);
++
++void lttng_destroy_context(struct lttng_ctx *ctx)
++{
++ int i;
++
++ if (!ctx)
++ return;
++ for (i = 0; i < ctx->nr_fields; i++) {
++ if (ctx->fields[i].destroy)
++ ctx->fields[i].destroy(&ctx->fields[i]);
++ }
++ kfree(ctx->fields);
++ kfree(ctx);
++}
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-endian.h
+@@ -0,0 +1,43 @@
++#ifndef _LTTNG_ENDIAN_H
++#define _LTTNG_ENDIAN_H
++
++/*
++ * lttng-endian.h
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#ifdef __KERNEL__
++# include <asm/byteorder.h>
++# ifdef __BIG_ENDIAN
++# define __BYTE_ORDER __BIG_ENDIAN
++# elif defined(__LITTLE_ENDIAN)
++# define __BYTE_ORDER __LITTLE_ENDIAN
++# else
++# error "unknown endianness"
++# endif
++#ifndef __BIG_ENDIAN
++# define __BIG_ENDIAN 4321
++#endif
++#ifndef __LITTLE_ENDIAN
++# define __LITTLE_ENDIAN 1234
++#endif
++#else
++# include <endian.h>
++#endif
++
++#endif /* _LTTNG_ENDIAN_H */
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-events.c
+@@ -0,0 +1,1260 @@
++/*
++ * lttng-events.c
++ *
++ * Holds LTTng per-session event registry.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/list.h>
++#include <linux/mutex.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/jiffies.h>
++#include <linux/utsname.h>
++#include "wrapper/uuid.h"
++#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
++#include "wrapper/random.h"
++#include "wrapper/tracepoint.h"
++#include "lttng-kernel-version.h"
++#include "lttng-events.h"
++#include "lttng-tracer.h"
++#include "lttng-abi-old.h"
++
++#define METADATA_CACHE_DEFAULT_SIZE 4096
++
++static LIST_HEAD(sessions);
++static LIST_HEAD(lttng_transport_list);
++/*
++ * Protect the sessions and metadata caches.
++ */
++static DEFINE_MUTEX(sessions_mutex);
++static struct kmem_cache *event_cache;
++
++static void _lttng_event_destroy(struct lttng_event *event);
++static void _lttng_channel_destroy(struct lttng_channel *chan);
++static int _lttng_event_unregister(struct lttng_event *event);
++static
++int _lttng_event_metadata_statedump(struct lttng_session *session,
++ struct lttng_channel *chan,
++ struct lttng_event *event);
++static
++int _lttng_session_metadata_statedump(struct lttng_session *session);
++static
++void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
++
++void synchronize_trace(void)
++{
++ synchronize_sched();
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++#ifdef CONFIG_PREEMPT_RT_FULL
++ synchronize_rcu();
++#endif
++#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
++#ifdef CONFIG_PREEMPT_RT
++ synchronize_rcu();
++#endif
++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
++}
++
++struct lttng_session *lttng_session_create(void)
++{
++ struct lttng_session *session;
++ struct lttng_metadata_cache *metadata_cache;
++
++ mutex_lock(&sessions_mutex);
++ session = kzalloc(sizeof(struct lttng_session), GFP_KERNEL);
++ if (!session)
++ goto err;
++ INIT_LIST_HEAD(&session->chan);
++ INIT_LIST_HEAD(&session->events);
++ uuid_le_gen(&session->uuid);
++
++ metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
++ GFP_KERNEL);
++ if (!metadata_cache)
++ goto err_free_session;
++ metadata_cache->data = kzalloc(METADATA_CACHE_DEFAULT_SIZE,
++ GFP_KERNEL);
++ if (!metadata_cache->data)
++ goto err_free_cache;
++ metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
++ kref_init(&metadata_cache->refcount);
++ session->metadata_cache = metadata_cache;
++ INIT_LIST_HEAD(&metadata_cache->metadata_stream);
++ list_add(&session->list, &sessions);
++ mutex_unlock(&sessions_mutex);
++ return session;
++
++err_free_cache:
++ kfree(metadata_cache);
++err_free_session:
++ kfree(session);
++err:
++ mutex_unlock(&sessions_mutex);
++ return NULL;
++}
++
++void metadata_cache_destroy(struct kref *kref)
++{
++ struct lttng_metadata_cache *cache =
++ container_of(kref, struct lttng_metadata_cache, refcount);
++ kfree(cache->data);
++ kfree(cache);
++}
++
++void lttng_session_destroy(struct lttng_session *session)
++{
++ struct lttng_channel *chan, *tmpchan;
++ struct lttng_event *event, *tmpevent;
++ struct lttng_metadata_stream *metadata_stream;
++ int ret;
++
++ mutex_lock(&sessions_mutex);
++ ACCESS_ONCE(session->active) = 0;
++ list_for_each_entry(chan, &session->chan, list) {
++ ret = lttng_syscalls_unregister(chan);
++ WARN_ON(ret);
++ }
++ list_for_each_entry(event, &session->events, list) {
++ ret = _lttng_event_unregister(event);
++ WARN_ON(ret);
++ }
++ synchronize_trace(); /* Wait for in-flight events to complete */
++ list_for_each_entry_safe(event, tmpevent, &session->events, list)
++ _lttng_event_destroy(event);
++ list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
++ BUG_ON(chan->channel_type == METADATA_CHANNEL);
++ _lttng_channel_destroy(chan);
++ }
++ list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
++ _lttng_metadata_channel_hangup(metadata_stream);
++ kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
++ list_del(&session->list);
++ mutex_unlock(&sessions_mutex);
++ kfree(session);
++}
++
++int lttng_session_enable(struct lttng_session *session)
++{
++ int ret = 0;
++ struct lttng_channel *chan;
++
++ mutex_lock(&sessions_mutex);
++ if (session->active) {
++ ret = -EBUSY;
++ goto end;
++ }
++
++ /*
++ * Snapshot the number of events per channel to know the type of header
++ * we need to use.
++ */
++ list_for_each_entry(chan, &session->chan, list) {
++ if (chan->header_type)
++ continue; /* don't change it if session stop/restart */
++ if (chan->free_event_id < 31)
++ chan->header_type = 1; /* compact */
++ else
++ chan->header_type = 2; /* large */
++ }
++
++ ACCESS_ONCE(session->active) = 1;
++ ACCESS_ONCE(session->been_active) = 1;
++ ret = _lttng_session_metadata_statedump(session);
++ if (ret) {
++ ACCESS_ONCE(session->active) = 0;
++ goto end;
++ }
++ ret = lttng_statedump_start(session);
++ if (ret)
++ ACCESS_ONCE(session->active) = 0;
++end:
++ mutex_unlock(&sessions_mutex);
++ return ret;
++}
++
++int lttng_session_disable(struct lttng_session *session)
++{
++ int ret = 0;
++
++ mutex_lock(&sessions_mutex);
++ if (!session->active) {
++ ret = -EBUSY;
++ goto end;
++ }
++ ACCESS_ONCE(session->active) = 0;
++end:
++ mutex_unlock(&sessions_mutex);
++ return ret;
++}
++
++int lttng_channel_enable(struct lttng_channel *channel)
++{
++ int old;
++
++ if (channel->channel_type == METADATA_CHANNEL)
++ return -EPERM;
++ old = xchg(&channel->enabled, 1);
++ if (old)
++ return -EEXIST;
++ return 0;
++}
++
++int lttng_channel_disable(struct lttng_channel *channel)
++{
++ int old;
++
++ if (channel->channel_type == METADATA_CHANNEL)
++ return -EPERM;
++ old = xchg(&channel->enabled, 0);
++ if (!old)
++ return -EEXIST;
++ return 0;
++}
++
++int lttng_event_enable(struct lttng_event *event)
++{
++ int old;
++
++ if (event->chan->channel_type == METADATA_CHANNEL)
++ return -EPERM;
++ old = xchg(&event->enabled, 1);
++ if (old)
++ return -EEXIST;
++ return 0;
++}
++
++int lttng_event_disable(struct lttng_event *event)
++{
++ int old;
++
++ if (event->chan->channel_type == METADATA_CHANNEL)
++ return -EPERM;
++ old = xchg(&event->enabled, 0);
++ if (!old)
++ return -EEXIST;
++ return 0;
++}
++
++static struct lttng_transport *lttng_transport_find(const char *name)
++{
++ struct lttng_transport *transport;
++
++ list_for_each_entry(transport, &lttng_transport_list, node) {
++ if (!strcmp(transport->name, name))
++ return transport;
++ }
++ return NULL;
++}
++
++struct lttng_channel *lttng_channel_create(struct lttng_session *session,
++ const char *transport_name,
++ void *buf_addr,
++ size_t subbuf_size, size_t num_subbuf,
++ unsigned int switch_timer_interval,
++ unsigned int read_timer_interval,
++ enum channel_type channel_type)
++{
++ struct lttng_channel *chan;
++ struct lttng_transport *transport = NULL;
++
++ mutex_lock(&sessions_mutex);
++ if (session->been_active && channel_type != METADATA_CHANNEL)
++ goto active; /* Refuse to add channel to active session */
++ transport = lttng_transport_find(transport_name);
++ if (!transport) {
++ printk(KERN_WARNING "LTTng transport %s not found\n",
++ transport_name);
++ goto notransport;
++ }
++ if (!try_module_get(transport->owner)) {
++ printk(KERN_WARNING "LTT : Can't lock transport module.\n");
++ goto notransport;
++ }
++ chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
++ if (!chan)
++ goto nomem;
++ chan->session = session;
++ chan->id = session->free_chan_id++;
++ /*
++ * Note: the channel creation op already writes into the packet
++ * headers. Therefore the "chan" information used as input
++ * should be already accessible.
++ */
++ chan->chan = transport->ops.channel_create(transport_name,
++ chan, buf_addr, subbuf_size, num_subbuf,
++ switch_timer_interval, read_timer_interval);
++ if (!chan->chan)
++ goto create_error;
++ chan->enabled = 1;
++ chan->ops = &transport->ops;
++ chan->transport = transport;
++ chan->channel_type = channel_type;
++ list_add(&chan->list, &session->chan);
++ mutex_unlock(&sessions_mutex);
++ return chan;
++
++create_error:
++ kfree(chan);
++nomem:
++ if (transport)
++ module_put(transport->owner);
++notransport:
++active:
++ mutex_unlock(&sessions_mutex);
++ return NULL;
++}
++
++/*
++ * Only used internally at session destruction for per-cpu channels, and
++ * when metadata channel is released.
++ * Needs to be called with sessions mutex held.
++ */
++static
++void _lttng_channel_destroy(struct lttng_channel *chan)
++{
++ chan->ops->channel_destroy(chan->chan);
++ module_put(chan->transport->owner);
++ list_del(&chan->list);
++ lttng_destroy_context(chan->ctx);
++ kfree(chan);
++}
++
++void lttng_metadata_channel_destroy(struct lttng_channel *chan)
++{
++ BUG_ON(chan->channel_type != METADATA_CHANNEL);
++
++ /* Protect the metadata cache with the sessions_mutex. */
++ mutex_lock(&sessions_mutex);
++ _lttng_channel_destroy(chan);
++ mutex_unlock(&sessions_mutex);
++}
++EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
++
++static
++void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
++{
++ stream->finalized = 1;
++ wake_up_interruptible(&stream->read_wait);
++}
++
++/*
++ * Supports event creation while tracing session is active.
++ */
++struct lttng_event *lttng_event_create(struct lttng_channel *chan,
++ struct lttng_kernel_event *event_param,
++ void *filter,
++ const struct lttng_event_desc *internal_desc)
++{
++ struct lttng_event *event;
++ int ret;
++
++ mutex_lock(&sessions_mutex);
++ if (chan->free_event_id == -1U)
++ goto full;
++ /*
++ * This is O(n^2) (for each event, the loop is called at event
++ * creation). Might require a hash if we have lots of events.
++ */
++ list_for_each_entry(event, &chan->session->events, list)
++ if (!strcmp(event->desc->name, event_param->name))
++ goto exist;
++ event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
++ if (!event)
++ goto cache_error;
++ event->chan = chan;
++ event->filter = filter;
++ event->id = chan->free_event_id++;
++ event->enabled = 1;
++ event->instrumentation = event_param->instrumentation;
++ /* Populate lttng_event structure before tracepoint registration. */
++ smp_wmb();
++ switch (event_param->instrumentation) {
++ case LTTNG_KERNEL_TRACEPOINT:
++ event->desc = lttng_event_get(event_param->name);
++ if (!event->desc)
++ goto register_error;
++ ret = kabi_2635_tracepoint_probe_register(event_param->name,
++ event->desc->probe_callback,
++ event);
++ if (ret)
++ goto register_error;
++ break;
++ case LTTNG_KERNEL_KPROBE:
++ ret = lttng_kprobes_register(event_param->name,
++ event_param->u.kprobe.symbol_name,
++ event_param->u.kprobe.offset,
++ event_param->u.kprobe.addr,
++ event);
++ if (ret)
++ goto register_error;
++ ret = try_module_get(event->desc->owner);
++ WARN_ON_ONCE(!ret);
++ break;
++ case LTTNG_KERNEL_KRETPROBE:
++ {
++ struct lttng_event *event_return;
++
++ /* kretprobe defines 2 events */
++ event_return =
++ kmem_cache_zalloc(event_cache, GFP_KERNEL);
++ if (!event_return)
++ goto register_error;
++ event_return->chan = chan;
++ event_return->filter = filter;
++ event_return->id = chan->free_event_id++;
++ event_return->enabled = 1;
++ event_return->instrumentation = event_param->instrumentation;
++ /*
++ * Populate lttng_event structure before kretprobe registration.
++ */
++ smp_wmb();
++ ret = lttng_kretprobes_register(event_param->name,
++ event_param->u.kretprobe.symbol_name,
++ event_param->u.kretprobe.offset,
++ event_param->u.kretprobe.addr,
++ event, event_return);
++ if (ret) {
++ kmem_cache_free(event_cache, event_return);
++ goto register_error;
++ }
++ /* Take 2 refs on the module: one per event. */
++ ret = try_module_get(event->desc->owner);
++ WARN_ON_ONCE(!ret);
++ ret = try_module_get(event->desc->owner);
++ WARN_ON_ONCE(!ret);
++ ret = _lttng_event_metadata_statedump(chan->session, chan,
++ event_return);
++ if (ret) {
++ kmem_cache_free(event_cache, event_return);
++ module_put(event->desc->owner);
++ module_put(event->desc->owner);
++ goto statedump_error;
++ }
++ list_add(&event_return->list, &chan->session->events);
++ break;
++ }
++ case LTTNG_KERNEL_FUNCTION:
++ ret = lttng_ftrace_register(event_param->name,
++ event_param->u.ftrace.symbol_name,
++ event);
++ if (ret)
++ goto register_error;
++ ret = try_module_get(event->desc->owner);
++ WARN_ON_ONCE(!ret);
++ break;
++ case LTTNG_KERNEL_NOOP:
++ event->desc = internal_desc;
++ if (!event->desc)
++ goto register_error;
++ break;
++ default:
++ WARN_ON_ONCE(1);
++ goto register_error;
++ }
++ ret = _lttng_event_metadata_statedump(chan->session, chan, event);
++ if (ret)
++ goto statedump_error;
++ list_add(&event->list, &chan->session->events);
++ mutex_unlock(&sessions_mutex);
++ return event;
++
++statedump_error:
++ /* If a statedump error occurs, events will not be readable. */
++register_error:
++ kmem_cache_free(event_cache, event);
++cache_error:
++exist:
++full:
++ mutex_unlock(&sessions_mutex);
++ return NULL;
++}
++
++/*
++ * Only used internally at session destruction.
++ */
++int _lttng_event_unregister(struct lttng_event *event)
++{
++ int ret = -EINVAL;
++
++ switch (event->instrumentation) {
++ case LTTNG_KERNEL_TRACEPOINT:
++ ret = kabi_2635_tracepoint_probe_unregister(event->desc->name,
++ event->desc->probe_callback,
++ event);
++ if (ret)
++ return ret;
++ break;
++ case LTTNG_KERNEL_KPROBE:
++ lttng_kprobes_unregister(event);
++ ret = 0;
++ break;
++ case LTTNG_KERNEL_KRETPROBE:
++ lttng_kretprobes_unregister(event);
++ ret = 0;
++ break;
++ case LTTNG_KERNEL_FUNCTION:
++ lttng_ftrace_unregister(event);
++ ret = 0;
++ break;
++ case LTTNG_KERNEL_NOOP:
++ ret = 0;
++ break;
++ default:
++ WARN_ON_ONCE(1);
++ }
++ return ret;
++}
++
++/*
++ * Only used internally at session destruction.
++ */
++static
++void _lttng_event_destroy(struct lttng_event *event)
++{
++ switch (event->instrumentation) {
++ case LTTNG_KERNEL_TRACEPOINT:
++ lttng_event_put(event->desc);
++ break;
++ case LTTNG_KERNEL_KPROBE:
++ module_put(event->desc->owner);
++ lttng_kprobes_destroy_private(event);
++ break;
++ case LTTNG_KERNEL_KRETPROBE:
++ module_put(event->desc->owner);
++ lttng_kretprobes_destroy_private(event);
++ break;
++ case LTTNG_KERNEL_FUNCTION:
++ module_put(event->desc->owner);
++ lttng_ftrace_destroy_private(event);
++ break;
++ case LTTNG_KERNEL_NOOP:
++ break;
++ default:
++ WARN_ON_ONCE(1);
++ }
++ list_del(&event->list);
++ lttng_destroy_context(event->ctx);
++ kmem_cache_free(event_cache, event);
++}
++
++/*
++ * Serialize at most one packet worth of metadata into a metadata
++ * channel.
++ * We have exclusive access to our metadata buffer (protected by the
++ * sessions_mutex), so we can do racy operations such as looking for
++ * remaining space left in packet and write, since mutual exclusion
++ * protects us from concurrent writes.
++ */
++int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
++ struct channel *chan)
++{
++ struct lib_ring_buffer_ctx ctx;
++ int ret = 0;
++ size_t len, reserve_len;
++
++ /*
++ * Ensure we support mutiple get_next / put sequences followed
++ * by put_next.
++ */
++ WARN_ON(stream->metadata_in < stream->metadata_out);
++ if (stream->metadata_in != stream->metadata_out)
++ return 0;
++
++ len = stream->metadata_cache->metadata_written -
++ stream->metadata_in;
++ if (!len)
++ return 0;
++ reserve_len = min_t(size_t,
++ stream->transport->ops.packet_avail_size(chan),
++ len);
++ lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
++ sizeof(char), -1);
++ /*
++ * If reservation failed, return an error to the caller.
++ */
++ ret = stream->transport->ops.event_reserve(&ctx, 0);
++ if (ret != 0) {
++ printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
++ goto end;
++ }
++ stream->transport->ops.event_write(&ctx,
++ stream->metadata_cache->data + stream->metadata_in,
++ reserve_len);
++ stream->transport->ops.event_commit(&ctx);
++ stream->metadata_in += reserve_len;
++ ret = reserve_len;
++
++end:
++ return ret;
++}
++
++/*
++ * Write the metadata to the metadata cache.
++ * Must be called with sessions_mutex held.
++ */
++int lttng_metadata_printf(struct lttng_session *session,
++ const char *fmt, ...)
++{
++ char *str;
++ size_t len;
++ va_list ap;
++ struct lttng_metadata_stream *stream;
++
++ WARN_ON_ONCE(!ACCESS_ONCE(session->active));
++
++ va_start(ap, fmt);
++ str = kvasprintf(GFP_KERNEL, fmt, ap);
++ va_end(ap);
++ if (!str)
++ return -ENOMEM;
++
++ len = strlen(str);
++ if (session->metadata_cache->metadata_written + len >
++ session->metadata_cache->cache_alloc) {
++ char *tmp_cache_realloc;
++ unsigned int tmp_cache_alloc_size;
++
++ tmp_cache_alloc_size = max_t(unsigned int,
++ session->metadata_cache->cache_alloc + len,
++ session->metadata_cache->cache_alloc << 1);
++ tmp_cache_realloc = krealloc(session->metadata_cache->data,
++ tmp_cache_alloc_size, GFP_KERNEL);
++ if (!tmp_cache_realloc)
++ goto err;
++ session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
++ session->metadata_cache->data = tmp_cache_realloc;
++ }
++ memcpy(session->metadata_cache->data +
++ session->metadata_cache->metadata_written,
++ str, len);
++ session->metadata_cache->metadata_written += len;
++ kfree(str);
++
++ list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
++ wake_up_interruptible(&stream->read_wait);
++
++ return 0;
++
++err:
++ kfree(str);
++ return -ENOMEM;
++}
++
++/*
++ * Must be called with sessions_mutex held.
++ */
++static
++int _lttng_field_statedump(struct lttng_session *session,
++ const struct lttng_event_field *field)
++{
++ int ret = 0;
++
++ switch (field->type.atype) {
++ case atype_integer:
++ ret = lttng_metadata_printf(session,
++ " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s;\n",
++ field->type.u.basic.integer.size,
++ field->type.u.basic.integer.alignment,
++ field->type.u.basic.integer.signedness,
++ (field->type.u.basic.integer.encoding == lttng_encode_none)
++ ? "none"
++ : (field->type.u.basic.integer.encoding == lttng_encode_UTF8)
++ ? "UTF8"
++ : "ASCII",
++ field->type.u.basic.integer.base,
++#ifdef __BIG_ENDIAN
++ field->type.u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
++#else
++ field->type.u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
++#endif
++ field->name);
++ break;
++ case atype_enum:
++ ret = lttng_metadata_printf(session,
++ " %s _%s;\n",
++ field->type.u.basic.enumeration.name,
++ field->name);
++ break;
++ case atype_array:
++ {
++ const struct lttng_basic_type *elem_type;
++
++ elem_type = &field->type.u.array.elem_type;
++ ret = lttng_metadata_printf(session,
++ " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[%u];\n",
++ elem_type->u.basic.integer.size,
++ elem_type->u.basic.integer.alignment,
++ elem_type->u.basic.integer.signedness,
++ (elem_type->u.basic.integer.encoding == lttng_encode_none)
++ ? "none"
++ : (elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
++ ? "UTF8"
++ : "ASCII",
++ elem_type->u.basic.integer.base,
++#ifdef __BIG_ENDIAN
++ elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
++#else
++ elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
++#endif
++ field->name, field->type.u.array.length);
++ break;
++ }
++ case atype_sequence:
++ {
++ const struct lttng_basic_type *elem_type;
++ const struct lttng_basic_type *length_type;
++
++ elem_type = &field->type.u.sequence.elem_type;
++ length_type = &field->type.u.sequence.length_type;
++ ret = lttng_metadata_printf(session,
++ " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } __%s_length;\n",
++ length_type->u.basic.integer.size,
++ (unsigned int) length_type->u.basic.integer.alignment,
++ length_type->u.basic.integer.signedness,
++ (length_type->u.basic.integer.encoding == lttng_encode_none)
++ ? "none"
++ : ((length_type->u.basic.integer.encoding == lttng_encode_UTF8)
++ ? "UTF8"
++ : "ASCII"),
++ length_type->u.basic.integer.base,
++#ifdef __BIG_ENDIAN
++ length_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
++#else
++ length_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
++#endif
++ field->name);
++ if (ret)
++ return ret;
++
++ ret = lttng_metadata_printf(session,
++ " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[ __%s_length ];\n",
++ elem_type->u.basic.integer.size,
++ (unsigned int) elem_type->u.basic.integer.alignment,
++ elem_type->u.basic.integer.signedness,
++ (elem_type->u.basic.integer.encoding == lttng_encode_none)
++ ? "none"
++ : ((elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
++ ? "UTF8"
++ : "ASCII"),
++ elem_type->u.basic.integer.base,
++#ifdef __BIG_ENDIAN
++ elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
++#else
++ elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
++#endif
++ field->name,
++ field->name);
++ break;
++ }
++
++ case atype_string:
++ /* Default encoding is UTF8 */
++ ret = lttng_metadata_printf(session,
++ " string%s _%s;\n",
++ field->type.u.basic.string.encoding == lttng_encode_ASCII ?
++ " { encoding = ASCII; }" : "",
++ field->name);
++ break;
++ default:
++ WARN_ON_ONCE(1);
++ return -EINVAL;
++ }
++ return ret;
++}
++
++static
++int _lttng_context_metadata_statedump(struct lttng_session *session,
++ struct lttng_ctx *ctx)
++{
++ int ret = 0;
++ int i;
++
++ if (!ctx)
++ return 0;
++ for (i = 0; i < ctx->nr_fields; i++) {
++ const struct lttng_ctx_field *field = &ctx->fields[i];
++
++ ret = _lttng_field_statedump(session, &field->event_field);
++ if (ret)
++ return ret;
++ }
++ return ret;
++}
++
++static
++int _lttng_fields_metadata_statedump(struct lttng_session *session,
++ struct lttng_event *event)
++{
++ const struct lttng_event_desc *desc = event->desc;
++ int ret = 0;
++ int i;
++
++ for (i = 0; i < desc->nr_fields; i++) {
++ const struct lttng_event_field *field = &desc->fields[i];
++
++ ret = _lttng_field_statedump(session, field);
++ if (ret)
++ return ret;
++ }
++ return ret;
++}
++
++/*
++ * Must be called with sessions_mutex held.
++ */
++static
++int _lttng_event_metadata_statedump(struct lttng_session *session,
++ struct lttng_channel *chan,
++ struct lttng_event *event)
++{
++ int ret = 0;
++
++ if (event->metadata_dumped || !ACCESS_ONCE(session->active))
++ return 0;
++ if (chan->channel_type == METADATA_CHANNEL)
++ return 0;
++
++ ret = lttng_metadata_printf(session,
++ "event {\n"
++ " name = %s;\n"
++ " id = %u;\n"
++ " stream_id = %u;\n",
++ event->desc->name,
++ event->id,
++ event->chan->id);
++ if (ret)
++ goto end;
++
++ if (event->ctx) {
++ ret = lttng_metadata_printf(session,
++ " context := struct {\n");
++ if (ret)
++ goto end;
++ }
++ ret = _lttng_context_metadata_statedump(session, event->ctx);
++ if (ret)
++ goto end;
++ if (event->ctx) {
++ ret = lttng_metadata_printf(session,
++ " };\n");
++ if (ret)
++ goto end;
++ }
++
++ ret = lttng_metadata_printf(session,
++ " fields := struct {\n"
++ );
++ if (ret)
++ goto end;
++
++ ret = _lttng_fields_metadata_statedump(session, event);
++ if (ret)
++ goto end;
++
++ /*
++ * LTTng space reservation can only reserve multiples of the
++ * byte size.
++ */
++ ret = lttng_metadata_printf(session,
++ " };\n"
++ "};\n\n");
++ if (ret)
++ goto end;
++
++ event->metadata_dumped = 1;
++end:
++ return ret;
++
++}
++
++/*
++ * Must be called with sessions_mutex held.
++ */
++static
++int _lttng_channel_metadata_statedump(struct lttng_session *session,
++ struct lttng_channel *chan)
++{
++ int ret = 0;
++
++ if (chan->metadata_dumped || !ACCESS_ONCE(session->active))
++ return 0;
++
++ if (chan->channel_type == METADATA_CHANNEL)
++ return 0;
++
++ WARN_ON_ONCE(!chan->header_type);
++ ret = lttng_metadata_printf(session,
++ "stream {\n"
++ " id = %u;\n"
++ " event.header := %s;\n"
++ " packet.context := struct packet_context;\n",
++ chan->id,
++ chan->header_type == 1 ? "struct event_header_compact" :
++ "struct event_header_large");
++ if (ret)
++ goto end;
++
++ if (chan->ctx) {
++ ret = lttng_metadata_printf(session,
++ " event.context := struct {\n");
++ if (ret)
++ goto end;
++ }
++ ret = _lttng_context_metadata_statedump(session, chan->ctx);
++ if (ret)
++ goto end;
++ if (chan->ctx) {
++ ret = lttng_metadata_printf(session,
++ " };\n");
++ if (ret)
++ goto end;
++ }
++
++ ret = lttng_metadata_printf(session,
++ "};\n\n");
++
++ chan->metadata_dumped = 1;
++end:
++ return ret;
++}
++
++/*
++ * Must be called with sessions_mutex held.
++ */
++static
++int _lttng_stream_packet_context_declare(struct lttng_session *session)
++{
++ return lttng_metadata_printf(session,
++ "struct packet_context {\n"
++ " uint64_clock_monotonic_t timestamp_begin;\n"
++ " uint64_clock_monotonic_t timestamp_end;\n"
++ " uint64_t content_size;\n"
++ " uint64_t packet_size;\n"
++ " unsigned long events_discarded;\n"
++ " uint32_t cpu_id;\n"
++ "};\n\n"
++ );
++}
++
++/*
++ * Compact header:
++ * id: range: 0 - 30.
++ * id 31 is reserved to indicate an extended header.
++ *
++ * Large header:
++ * id: range: 0 - 65534.
++ * id 65535 is reserved to indicate an extended header.
++ *
++ * Must be called with sessions_mutex held.
++ */
++static
++int _lttng_event_header_declare(struct lttng_session *session)
++{
++ return lttng_metadata_printf(session,
++ "struct event_header_compact {\n"
++ " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
++ " variant <id> {\n"
++ " struct {\n"
++ " uint27_clock_monotonic_t timestamp;\n"
++ " } compact;\n"
++ " struct {\n"
++ " uint32_t id;\n"
++ " uint64_clock_monotonic_t timestamp;\n"
++ " } extended;\n"
++ " } v;\n"
++ "} align(%u);\n"
++ "\n"
++ "struct event_header_large {\n"
++ " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
++ " variant <id> {\n"
++ " struct {\n"
++ " uint32_clock_monotonic_t timestamp;\n"
++ " } compact;\n"
++ " struct {\n"
++ " uint32_t id;\n"
++ " uint64_clock_monotonic_t timestamp;\n"
++ " } extended;\n"
++ " } v;\n"
++ "} align(%u);\n\n",
++ lttng_alignof(uint32_t) * CHAR_BIT,
++ lttng_alignof(uint16_t) * CHAR_BIT
++ );
++}
++
++ /*
++ * Approximation of NTP time of day to clock monotonic correlation,
++ * taken at start of trace.
++ * Yes, this is only an approximation. Yes, we can (and will) do better
++ * in future versions.
++ */
++static
++uint64_t measure_clock_offset(void)
++{
++ uint64_t offset, monotonic[2], realtime;
++ struct timespec rts = { 0, 0 };
++ unsigned long flags;
++
++ /* Disable interrupts to increase correlation precision. */
++ local_irq_save(flags);
++ monotonic[0] = trace_clock_read64();
++ getnstimeofday(&rts);
++ monotonic[1] = trace_clock_read64();
++ local_irq_restore(flags);
++
++ offset = (monotonic[0] + monotonic[1]) >> 1;
++ realtime = (uint64_t) rts.tv_sec * NSEC_PER_SEC;
++ realtime += rts.tv_nsec;
++ offset = realtime - offset;
++ return offset;
++}
++
++/*
++ * Output metadata into this session's metadata buffers.
++ * Must be called with sessions_mutex held.
++ */
++static
++int _lttng_session_metadata_statedump(struct lttng_session *session)
++{
++ unsigned char *uuid_c = session->uuid.b;
++ unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
++ struct lttng_channel *chan;
++ struct lttng_event *event;
++ int ret = 0;
++
++ if (!ACCESS_ONCE(session->active))
++ return 0;
++ if (session->metadata_dumped)
++ goto skip_session;
++
++ snprintf(uuid_s, sizeof(uuid_s),
++ "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
++ uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
++ uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
++ uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
++ uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
++
++ ret = lttng_metadata_printf(session,
++ "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
++ "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
++ "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
++ "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
++ "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
++ "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
++ "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
++ "\n"
++ "trace {\n"
++ " major = %u;\n"
++ " minor = %u;\n"
++ " uuid = \"%s\";\n"
++ " byte_order = %s;\n"
++ " packet.header := struct {\n"
++ " uint32_t magic;\n"
++ " uint8_t uuid[16];\n"
++ " uint32_t stream_id;\n"
++ " };\n"
++ "};\n\n",
++ lttng_alignof(uint8_t) * CHAR_BIT,
++ lttng_alignof(uint16_t) * CHAR_BIT,
++ lttng_alignof(uint32_t) * CHAR_BIT,
++ lttng_alignof(uint64_t) * CHAR_BIT,
++ sizeof(unsigned long) * CHAR_BIT,
++ lttng_alignof(unsigned long) * CHAR_BIT,
++ CTF_SPEC_MAJOR,
++ CTF_SPEC_MINOR,
++ uuid_s,
++#ifdef __BIG_ENDIAN
++ "be"
++#else
++ "le"
++#endif
++ );
++ if (ret)
++ goto end;
++
++ ret = lttng_metadata_printf(session,
++ "env {\n"
++ " hostname = \"%s\";\n"
++ " domain = \"kernel\";\n"
++ " sysname = \"%s\";\n"
++ " kernel_release = \"%s\";\n"
++ " kernel_version = \"%s\";\n"
++ " tracer_name = \"lttng-modules\";\n"
++ " tracer_major = %d;\n"
++ " tracer_minor = %d;\n"
++ " tracer_patchlevel = %d;\n"
++ "};\n\n",
++ current->nsproxy->uts_ns->name.nodename,
++ utsname()->sysname,
++ utsname()->release,
++ utsname()->version,
++ LTTNG_MODULES_MAJOR_VERSION,
++ LTTNG_MODULES_MINOR_VERSION,
++ LTTNG_MODULES_PATCHLEVEL_VERSION
++ );
++ if (ret)
++ goto end;
++
++ ret = lttng_metadata_printf(session,
++ "clock {\n"
++ " name = %s;\n",
++ "monotonic"
++ );
++ if (ret)
++ goto end;
++
++ if (!trace_clock_uuid(clock_uuid_s)) {
++ ret = lttng_metadata_printf(session,
++ " uuid = \"%s\";\n",
++ clock_uuid_s
++ );
++ if (ret)
++ goto end;
++ }
++
++ ret = lttng_metadata_printf(session,
++ " description = \"Monotonic Clock\";\n"
++ " freq = %llu; /* Frequency, in Hz */\n"
++ " /* clock value offset from Epoch is: offset * (1/freq) */\n"
++ " offset = %llu;\n"
++ "};\n\n",
++ (unsigned long long) trace_clock_freq(),
++ (unsigned long long) measure_clock_offset()
++ );
++ if (ret)
++ goto end;
++
++ ret = lttng_metadata_printf(session,
++ "typealias integer {\n"
++ " size = 27; align = 1; signed = false;\n"
++ " map = clock.monotonic.value;\n"
++ "} := uint27_clock_monotonic_t;\n"
++ "\n"
++ "typealias integer {\n"
++ " size = 32; align = %u; signed = false;\n"
++ " map = clock.monotonic.value;\n"
++ "} := uint32_clock_monotonic_t;\n"
++ "\n"
++ "typealias integer {\n"
++ " size = 64; align = %u; signed = false;\n"
++ " map = clock.monotonic.value;\n"
++ "} := uint64_clock_monotonic_t;\n\n",
++ lttng_alignof(uint32_t) * CHAR_BIT,
++ lttng_alignof(uint64_t) * CHAR_BIT
++ );
++ if (ret)
++ goto end;
++
++ ret = _lttng_stream_packet_context_declare(session);
++ if (ret)
++ goto end;
++
++ ret = _lttng_event_header_declare(session);
++ if (ret)
++ goto end;
++
++skip_session:
++ list_for_each_entry(chan, &session->chan, list) {
++ ret = _lttng_channel_metadata_statedump(session, chan);
++ if (ret)
++ goto end;
++ }
++
++ list_for_each_entry(event, &session->events, list) {
++ ret = _lttng_event_metadata_statedump(session, event->chan, event);
++ if (ret)
++ goto end;
++ }
++ session->metadata_dumped = 1;
++end:
++ return ret;
++}
++
++/**
++ * lttng_transport_register - LTT transport registration
++ * @transport: transport structure
++ *
++ * Registers a transport which can be used as output to extract the data out of
++ * LTTng. The module calling this registration function must ensure that no
++ * trap-inducing code will be executed by the transport functions. E.g.
++ * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
++ * is made visible to the transport function. This registration acts as a
++ * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
++ * after its registration must it synchronize the TLBs.
++ */
++void lttng_transport_register(struct lttng_transport *transport)
++{
++ /*
++ * Make sure no page fault can be triggered by the module about to be
++ * registered. We deal with this here so we don't have to call
++ * vmalloc_sync_all() in each module's init.
++ */
++ wrapper_vmalloc_sync_all();
++
++ mutex_lock(&sessions_mutex);
++ list_add_tail(&transport->node, &lttng_transport_list);
++ mutex_unlock(&sessions_mutex);
++}
++EXPORT_SYMBOL_GPL(lttng_transport_register);
++
++/**
++ * lttng_transport_unregister - LTT transport unregistration
++ * @transport: transport structure
++ */
++void lttng_transport_unregister(struct lttng_transport *transport)
++{
++ mutex_lock(&sessions_mutex);
++ list_del(&transport->node);
++ mutex_unlock(&sessions_mutex);
++}
++EXPORT_SYMBOL_GPL(lttng_transport_unregister);
++
++static int __init lttng_events_init(void)
++{
++ int ret;
++
++ event_cache = KMEM_CACHE(lttng_event, 0);
++ if (!event_cache)
++ return -ENOMEM;
++ ret = lttng_abi_init();
++ if (ret)
++ goto error_abi;
++ return 0;
++error_abi:
++ kmem_cache_destroy(event_cache);
++ return ret;
++}
++
++module_init(lttng_events_init);
++
++static void __exit lttng_events_exit(void)
++{
++ struct lttng_session *session, *tmpsession;
++
++ lttng_abi_exit();
++ list_for_each_entry_safe(session, tmpsession, &sessions, list)
++ lttng_session_destroy(session);
++ kmem_cache_destroy(event_cache);
++}
++
++module_exit(lttng_events_exit);
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
++MODULE_DESCRIPTION("LTTng Events");
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-events.h
+@@ -0,0 +1,507 @@
++#ifndef _LTTNG_EVENTS_H
++#define _LTTNG_EVENTS_H
++
++/*
++ * lttng-events.h
++ *
++ * Holds LTTng per-session event registry.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/version.h>
++#include <linux/list.h>
++#include <linux/kprobes.h>
++#include <linux/kref.h>
++#include "wrapper/uuid.h"
++#include "lttng-abi.h"
++#include "lttng-abi-old.h"
++
++#define lttng_is_signed_type(type) (((type)(-1)) < 0)
++
++struct lttng_channel;
++struct lttng_session;
++struct lttng_metadata_cache;
++struct lib_ring_buffer_ctx;
++struct perf_event;
++struct perf_event_attr;
++
++/* Type description */
++
++/* Update the astract_types name table in lttng-types.c along with this enum */
++enum abstract_types {
++ atype_integer,
++ atype_enum,
++ atype_array,
++ atype_sequence,
++ atype_string,
++ NR_ABSTRACT_TYPES,
++};
++
++/* Update the string_encodings name table in lttng-types.c along with this enum */
++enum lttng_string_encodings {
++ lttng_encode_none = 0,
++ lttng_encode_UTF8 = 1,
++ lttng_encode_ASCII = 2,
++ NR_STRING_ENCODINGS,
++};
++
++enum channel_type {
++ PER_CPU_CHANNEL,
++ METADATA_CHANNEL,
++};
++
++struct lttng_enum_entry {
++ unsigned long long start, end; /* start and end are inclusive */
++ const char *string;
++};
++
++#define __type_integer(_type, _byte_order, _base, _encoding) \
++ { \
++ .atype = atype_integer, \
++ .u.basic.integer = \
++ { \
++ .size = sizeof(_type) * CHAR_BIT, \
++ .alignment = lttng_alignof(_type) * CHAR_BIT, \
++ .signedness = lttng_is_signed_type(_type), \
++ .reverse_byte_order = _byte_order != __BYTE_ORDER, \
++ .base = _base, \
++ .encoding = lttng_encode_##_encoding, \
++ }, \
++ } \
++
++struct lttng_integer_type {
++ unsigned int size; /* in bits */
++ unsigned short alignment; /* in bits */
++ unsigned int signedness:1,
++ reverse_byte_order:1;
++ unsigned int base; /* 2, 8, 10, 16, for pretty print */
++ enum lttng_string_encodings encoding;
++};
++
++union _lttng_basic_type {
++ struct lttng_integer_type integer;
++ struct {
++ const char *name;
++ } enumeration;
++ struct {
++ enum lttng_string_encodings encoding;
++ } string;
++};
++
++struct lttng_basic_type {
++ enum abstract_types atype;
++ union {
++ union _lttng_basic_type basic;
++ } u;
++};
++
++struct lttng_type {
++ enum abstract_types atype;
++ union {
++ union _lttng_basic_type basic;
++ struct {
++ struct lttng_basic_type elem_type;
++ unsigned int length; /* num. elems. */
++ } array;
++ struct {
++ struct lttng_basic_type length_type;
++ struct lttng_basic_type elem_type;
++ } sequence;
++ } u;
++};
++
++struct lttng_enum {
++ const char *name;
++ struct lttng_type container_type;
++ const struct lttng_enum_entry *entries;
++ unsigned int len;
++};
++
++/* Event field description */
++
++struct lttng_event_field {
++ const char *name;
++ struct lttng_type type;
++};
++
++/*
++ * We need to keep this perf counter field separately from struct
++ * lttng_ctx_field because cpu hotplug needs fixed-location addresses.
++ */
++struct lttng_perf_counter_field {
++ struct notifier_block nb;
++ int hp_enable;
++ struct perf_event_attr *attr;
++ struct perf_event **e; /* per-cpu array */
++};
++
++struct lttng_ctx_field {
++ struct lttng_event_field event_field;
++ size_t (*get_size)(size_t offset);
++ void (*record)(struct lttng_ctx_field *field,
++ struct lib_ring_buffer_ctx *ctx,
++ struct lttng_channel *chan);
++ union {
++ struct lttng_perf_counter_field *perf_counter;
++ } u;
++ void (*destroy)(struct lttng_ctx_field *field);
++};
++
++struct lttng_ctx {
++ struct lttng_ctx_field *fields;
++ unsigned int nr_fields;
++ unsigned int allocated_fields;
++};
++
++struct lttng_event_desc {
++ const char *name;
++ void *probe_callback;
++ const struct lttng_event_ctx *ctx; /* context */
++ const struct lttng_event_field *fields; /* event payload */
++ unsigned int nr_fields;
++ struct module *owner;
++};
++
++struct lttng_probe_desc {
++ const struct lttng_event_desc **event_desc;
++ unsigned int nr_events;
++ struct list_head head; /* chain registered probes */
++};
++
++struct lttng_krp; /* Kretprobe handling */
++
++/*
++ * lttng_event structure is referred to by the tracing fast path. It must be
++ * kept small.
++ */
++struct lttng_event {
++ unsigned int id;
++ struct lttng_channel *chan;
++ int enabled;
++ const struct lttng_event_desc *desc;
++ void *filter;
++ struct lttng_ctx *ctx;
++ enum lttng_kernel_instrumentation instrumentation;
++ union {
++ struct {
++ struct kprobe kp;
++ char *symbol_name;
++ } kprobe;
++ struct {
++ struct lttng_krp *lttng_krp;
++ char *symbol_name;
++ } kretprobe;
++ struct {
++ char *symbol_name;
++ } ftrace;
++ } u;
++ struct list_head list; /* Event list */
++ unsigned int metadata_dumped:1;
++};
++
++struct lttng_channel_ops {
++ struct channel *(*channel_create)(const char *name,
++ struct lttng_channel *lttng_chan,
++ void *buf_addr,
++ size_t subbuf_size, size_t num_subbuf,
++ unsigned int switch_timer_interval,
++ unsigned int read_timer_interval);
++ void (*channel_destroy)(struct channel *chan);
++ struct lib_ring_buffer *(*buffer_read_open)(struct channel *chan);
++ int (*buffer_has_read_closed_stream)(struct channel *chan);
++ void (*buffer_read_close)(struct lib_ring_buffer *buf);
++ int (*event_reserve)(struct lib_ring_buffer_ctx *ctx,
++ uint32_t event_id);
++ void (*event_commit)(struct lib_ring_buffer_ctx *ctx);
++ void (*event_write)(struct lib_ring_buffer_ctx *ctx, const void *src,
++ size_t len);
++ void (*event_write_from_user)(struct lib_ring_buffer_ctx *ctx,
++ const void *src, size_t len);
++ void (*event_memset)(struct lib_ring_buffer_ctx *ctx,
++ int c, size_t len);
++ /*
++ * packet_avail_size returns the available size in the current
++ * packet. Note that the size returned is only a hint, since it
++ * may change due to concurrent writes.
++ */
++ size_t (*packet_avail_size)(struct channel *chan);
++ wait_queue_head_t *(*get_writer_buf_wait_queue)(struct channel *chan, int cpu);
++ wait_queue_head_t *(*get_hp_wait_queue)(struct channel *chan);
++ int (*is_finalized)(struct channel *chan);
++ int (*is_disabled)(struct channel *chan);
++};
++
++struct lttng_transport {
++ char *name;
++ struct module *owner;
++ struct list_head node;
++ struct lttng_channel_ops ops;
++};
++
++struct lttng_channel {
++ unsigned int id;
++ struct channel *chan; /* Channel buffers */
++ int enabled;
++ struct lttng_ctx *ctx;
++ /* Event ID management */
++ struct lttng_session *session;
++ struct file *file; /* File associated to channel */
++ unsigned int free_event_id; /* Next event ID to allocate */
++ struct list_head list; /* Channel list */
++ struct lttng_channel_ops *ops;
++ struct lttng_transport *transport;
++ struct lttng_event **sc_table; /* for syscall tracing */
++ struct lttng_event **compat_sc_table;
++ struct lttng_event *sc_unknown; /* for unknown syscalls */
++ struct lttng_event *sc_compat_unknown;
++ struct lttng_event *sc_exit; /* for syscall exit */
++ int header_type; /* 0: unset, 1: compact, 2: large */
++ enum channel_type channel_type;
++ unsigned int metadata_dumped:1;
++};
++
++struct lttng_metadata_stream {
++ void *priv; /* Ring buffer private data */
++ struct lttng_metadata_cache *metadata_cache;
++ unsigned int metadata_in; /* Bytes read from the cache */
++ unsigned int metadata_out; /* Bytes consumed from stream */
++ int finalized; /* Has channel been finalized */
++ wait_queue_head_t read_wait; /* Reader buffer-level wait queue */
++ struct list_head list; /* Stream list */
++ struct lttng_transport *transport;
++};
++
++struct lttng_session {
++ int active; /* Is trace session active ? */
++ int been_active; /* Has trace session been active ? */
++ struct file *file; /* File associated to session */
++ struct list_head chan; /* Channel list head */
++ struct list_head events; /* Event list head */
++ struct list_head list; /* Session list */
++ unsigned int free_chan_id; /* Next chan ID to allocate */
++ uuid_le uuid; /* Trace session unique ID */
++ struct lttng_metadata_cache *metadata_cache;
++ unsigned int metadata_dumped:1;
++};
++
++struct lttng_metadata_cache {
++ char *data; /* Metadata cache */
++ unsigned int cache_alloc; /* Metadata allocated size (bytes) */
++ unsigned int metadata_written; /* Number of bytes written in metadata cache */
++ struct kref refcount; /* Metadata cache usage */
++ struct list_head metadata_stream; /* Metadata stream list */
++};
++
++struct lttng_session *lttng_session_create(void);
++int lttng_session_enable(struct lttng_session *session);
++int lttng_session_disable(struct lttng_session *session);
++void lttng_session_destroy(struct lttng_session *session);
++void metadata_cache_destroy(struct kref *kref);
++
++struct lttng_channel *lttng_channel_create(struct lttng_session *session,
++ const char *transport_name,
++ void *buf_addr,
++ size_t subbuf_size, size_t num_subbuf,
++ unsigned int switch_timer_interval,
++ unsigned int read_timer_interval,
++ enum channel_type channel_type);
++struct lttng_channel *lttng_global_channel_create(struct lttng_session *session,
++ int overwrite, void *buf_addr,
++ size_t subbuf_size, size_t num_subbuf,
++ unsigned int switch_timer_interval,
++ unsigned int read_timer_interval);
++
++void lttng_metadata_channel_destroy(struct lttng_channel *chan);
++struct lttng_event *lttng_event_create(struct lttng_channel *chan,
++ struct lttng_kernel_event *event_param,
++ void *filter,
++ const struct lttng_event_desc *internal_desc);
++struct lttng_event *lttng_event_compat_old_create(struct lttng_channel *chan,
++ struct lttng_kernel_old_event *old_event_param,
++ void *filter,
++ const struct lttng_event_desc *internal_desc);
++
++int lttng_channel_enable(struct lttng_channel *channel);
++int lttng_channel_disable(struct lttng_channel *channel);
++int lttng_event_enable(struct lttng_event *event);
++int lttng_event_disable(struct lttng_event *event);
++
++void lttng_transport_register(struct lttng_transport *transport);
++void lttng_transport_unregister(struct lttng_transport *transport);
++
++void synchronize_trace(void);
++int lttng_abi_init(void);
++int lttng_abi_compat_old_init(void);
++void lttng_abi_exit(void);
++void lttng_abi_compat_old_exit(void);
++
++int lttng_probe_register(struct lttng_probe_desc *desc);
++void lttng_probe_unregister(struct lttng_probe_desc *desc);
++const struct lttng_event_desc *lttng_event_get(const char *name);
++void lttng_event_put(const struct lttng_event_desc *desc);
++int lttng_probes_init(void);
++void lttng_probes_exit(void);
++
++int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
++ struct channel *chan);
++
++#if defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
++int lttng_syscalls_register(struct lttng_channel *chan, void *filter);
++int lttng_syscalls_unregister(struct lttng_channel *chan);
++#else
++static inline int lttng_syscalls_register(struct lttng_channel *chan, void *filter)
++{
++ return -ENOSYS;
++}
++
++static inline int lttng_syscalls_unregister(struct lttng_channel *chan)
++{
++ return 0;
++}
++#endif
++
++struct lttng_ctx_field *lttng_append_context(struct lttng_ctx **ctx);
++int lttng_find_context(struct lttng_ctx *ctx, const char *name);
++void lttng_remove_context_field(struct lttng_ctx **ctx,
++ struct lttng_ctx_field *field);
++void lttng_destroy_context(struct lttng_ctx *ctx);
++int lttng_add_pid_to_ctx(struct lttng_ctx **ctx);
++int lttng_add_procname_to_ctx(struct lttng_ctx **ctx);
++int lttng_add_prio_to_ctx(struct lttng_ctx **ctx);
++int lttng_add_nice_to_ctx(struct lttng_ctx **ctx);
++int lttng_add_vpid_to_ctx(struct lttng_ctx **ctx);
++int lttng_add_tid_to_ctx(struct lttng_ctx **ctx);
++int lttng_add_vtid_to_ctx(struct lttng_ctx **ctx);
++int lttng_add_ppid_to_ctx(struct lttng_ctx **ctx);
++int lttng_add_vppid_to_ctx(struct lttng_ctx **ctx);
++int lttng_add_hostname_to_ctx(struct lttng_ctx **ctx);
++#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
++int lttng_add_perf_counter_to_ctx(uint32_t type,
++ uint64_t config,
++ const char *name,
++ struct lttng_ctx **ctx);
++#else
++static inline
++int lttng_add_perf_counter_to_ctx(uint32_t type,
++ uint64_t config,
++ const char *name,
++ struct lttng_ctx **ctx)
++{
++ return -ENOSYS;
++}
++#endif
++
++extern int lttng_statedump_start(struct lttng_session *session);
++
++#ifdef CONFIG_KPROBES
++int lttng_kprobes_register(const char *name,
++ const char *symbol_name,
++ uint64_t offset,
++ uint64_t addr,
++ struct lttng_event *event);
++void lttng_kprobes_unregister(struct lttng_event *event);
++void lttng_kprobes_destroy_private(struct lttng_event *event);
++#else
++static inline
++int lttng_kprobes_register(const char *name,
++ const char *symbol_name,
++ uint64_t offset,
++ uint64_t addr,
++ struct lttng_event *event)
++{
++ return -ENOSYS;
++}
++
++static inline
++void lttng_kprobes_unregister(struct lttng_event *event)
++{
++}
++
++static inline
++void lttng_kprobes_destroy_private(struct lttng_event *event)
++{
++}
++#endif
++
++#ifdef CONFIG_KRETPROBES
++int lttng_kretprobes_register(const char *name,
++ const char *symbol_name,
++ uint64_t offset,
++ uint64_t addr,
++ struct lttng_event *event_entry,
++ struct lttng_event *event_exit);
++void lttng_kretprobes_unregister(struct lttng_event *event);
++void lttng_kretprobes_destroy_private(struct lttng_event *event);
++#else
++static inline
++int lttng_kretprobes_register(const char *name,
++ const char *symbol_name,
++ uint64_t offset,
++ uint64_t addr,
++ struct lttng_event *event_entry,
++ struct lttng_event *event_exit)
++{
++ return -ENOSYS;
++}
++
++static inline
++void lttng_kretprobes_unregister(struct lttng_event *event)
++{
++}
++
++static inline
++void lttng_kretprobes_destroy_private(struct lttng_event *event)
++{
++}
++#endif
++
++#ifdef CONFIG_DYNAMIC_FTRACE
++int lttng_ftrace_register(const char *name,
++ const char *symbol_name,
++ struct lttng_event *event);
++void lttng_ftrace_unregister(struct lttng_event *event);
++void lttng_ftrace_destroy_private(struct lttng_event *event);
++#else
++static inline
++int lttng_ftrace_register(const char *name,
++ const char *symbol_name,
++ struct lttng_event *event)
++{
++ return -ENOSYS;
++}
++
++static inline
++void lttng_ftrace_unregister(struct lttng_event *event)
++{
++}
++
++static inline
++void lttng_ftrace_destroy_private(struct lttng_event *event)
++{
++}
++#endif
++
++int lttng_calibrate(struct lttng_kernel_calibrate *calibrate);
++
++extern const struct file_operations lttng_tracepoint_list_fops;
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++#define TRACEPOINT_HAS_DATA_ARG
++#endif
++
++#endif /* _LTTNG_EVENTS_H */
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-kernel-version.h
+@@ -0,0 +1,36 @@
++#ifndef _LTTNG_KERNEL_VERSION_H
++#define _LTTNG_KERNEL_VERSION_H
++
++/*
++ * lttng-kernel-version.h
++ *
++ * Contains helpers to check more complex kernel version conditions.
++ *
++ * Copyright (C) 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/version.h>
++
++/*
++ * This macro checks if the kernel version is between the two specified
++ * versions (lower limit inclusive, upper limit exclusive).
++ */
++#define LTTNG_KERNEL_RANGE(a_low, b_low, c_low, a_high, b_high, c_high) \
++ (LINUX_VERSION_CODE >= KERNEL_VERSION(a_low, b_low, c_low) && \
++ LINUX_VERSION_CODE < KERNEL_VERSION(a_high, b_high, c_high))
++
++#endif /* _LTTNG_KERNEL_VERSION_H */
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-probes.c
+@@ -0,0 +1,171 @@
++/*
++ * lttng-probes.c
++ *
++ * Holds LTTng probes registry.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/list.h>
++#include <linux/mutex.h>
++#include <linux/seq_file.h>
++
++#include "lttng-events.h"
++
++static LIST_HEAD(probe_list);
++static DEFINE_MUTEX(probe_mutex);
++
++static
++const struct lttng_event_desc *find_event(const char *name)
++{
++ struct lttng_probe_desc *probe_desc;
++ int i;
++
++ list_for_each_entry(probe_desc, &probe_list, head) {
++ for (i = 0; i < probe_desc->nr_events; i++) {
++ if (!strcmp(probe_desc->event_desc[i]->name, name))
++ return probe_desc->event_desc[i];
++ }
++ }
++ return NULL;
++}
++
++int lttng_probe_register(struct lttng_probe_desc *desc)
++{
++ int ret = 0;
++ int i;
++
++ mutex_lock(&probe_mutex);
++ /*
++ * TODO: This is O(N^2). Turn into a hash table when probe registration
++ * overhead becomes an issue.
++ */
++ for (i = 0; i < desc->nr_events; i++) {
++ if (find_event(desc->event_desc[i]->name)) {
++ ret = -EEXIST;
++ goto end;
++ }
++ }
++ list_add(&desc->head, &probe_list);
++end:
++ mutex_unlock(&probe_mutex);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(lttng_probe_register);
++
++void lttng_probe_unregister(struct lttng_probe_desc *desc)
++{
++ mutex_lock(&probe_mutex);
++ list_del(&desc->head);
++ mutex_unlock(&probe_mutex);
++}
++EXPORT_SYMBOL_GPL(lttng_probe_unregister);
++
++const struct lttng_event_desc *lttng_event_get(const char *name)
++{
++ const struct lttng_event_desc *event;
++ int ret;
++
++ mutex_lock(&probe_mutex);
++ event = find_event(name);
++ mutex_unlock(&probe_mutex);
++ if (!event)
++ return NULL;
++ ret = try_module_get(event->owner);
++ WARN_ON_ONCE(!ret);
++ return event;
++}
++EXPORT_SYMBOL_GPL(lttng_event_get);
++
++void lttng_event_put(const struct lttng_event_desc *event)
++{
++ module_put(event->owner);
++}
++EXPORT_SYMBOL_GPL(lttng_event_put);
++
++static
++void *tp_list_start(struct seq_file *m, loff_t *pos)
++{
++ struct lttng_probe_desc *probe_desc;
++ int iter = 0, i;
++
++ mutex_lock(&probe_mutex);
++ list_for_each_entry(probe_desc, &probe_list, head) {
++ for (i = 0; i < probe_desc->nr_events; i++) {
++ if (iter++ >= *pos)
++ return (void *) probe_desc->event_desc[i];
++ }
++ }
++ /* End of list */
++ return NULL;
++}
++
++static
++void *tp_list_next(struct seq_file *m, void *p, loff_t *ppos)
++{
++ struct lttng_probe_desc *probe_desc;
++ int iter = 0, i;
++
++ (*ppos)++;
++ list_for_each_entry(probe_desc, &probe_list, head) {
++ for (i = 0; i < probe_desc->nr_events; i++) {
++ if (iter++ >= *ppos)
++ return (void *) probe_desc->event_desc[i];
++ }
++ }
++ /* End of list */
++ return NULL;
++}
++
++static
++void tp_list_stop(struct seq_file *m, void *p)
++{
++ mutex_unlock(&probe_mutex);
++}
++
++static
++int tp_list_show(struct seq_file *m, void *p)
++{
++ const struct lttng_event_desc *probe_desc = p;
++
++ seq_printf(m, "event { name = %s; };\n",
++ probe_desc->name);
++ return 0;
++}
++
++static
++const struct seq_operations lttng_tracepoint_list_seq_ops = {
++ .start = tp_list_start,
++ .next = tp_list_next,
++ .stop = tp_list_stop,
++ .show = tp_list_show,
++};
++
++static
++int lttng_tracepoint_list_open(struct inode *inode, struct file *file)
++{
++ return seq_open(file, &lttng_tracepoint_list_seq_ops);
++}
++
++const struct file_operations lttng_tracepoint_list_fops = {
++ .owner = THIS_MODULE,
++ .open = lttng_tracepoint_list_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = seq_release,
++};
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-ring-buffer-client-discard.c
+@@ -0,0 +1,33 @@
++/*
++ * lttng-ring-buffer-client-discard.c
++ *
++ * LTTng lib ring buffer client (discard mode).
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include "lttng-tracer.h"
++
++#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
++#define RING_BUFFER_MODE_TEMPLATE_STRING "discard"
++#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_SPLICE
++#include "lttng-ring-buffer-client.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("LTTng Ring Buffer Client Discard Mode");
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-ring-buffer-client-mmap-discard.c
+@@ -0,0 +1,33 @@
++/*
++ * lttng-ring-buffer-client-discard.c
++ *
++ * LTTng lib ring buffer client (discard mode).
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include "lttng-tracer.h"
++
++#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
++#define RING_BUFFER_MODE_TEMPLATE_STRING "discard-mmap"
++#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_MMAP
++#include "lttng-ring-buffer-client.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("LTTng Ring Buffer Client Discard Mode");
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-ring-buffer-client-mmap-overwrite.c
+@@ -0,0 +1,33 @@
++/*
++ * lttng-ring-buffer-client-overwrite.c
++ *
++ * LTTng lib ring buffer client (overwrite mode).
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include "lttng-tracer.h"
++
++#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_OVERWRITE
++#define RING_BUFFER_MODE_TEMPLATE_STRING "overwrite-mmap"
++#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_MMAP
++#include "lttng-ring-buffer-client.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("LTTng Ring Buffer Client Overwrite Mode");
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-ring-buffer-client-overwrite.c
+@@ -0,0 +1,33 @@
++/*
++ * lttng-ring-buffer-client-overwrite.c
++ *
++ * LTTng lib ring buffer client (overwrite mode).
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include "lttng-tracer.h"
++
++#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_OVERWRITE
++#define RING_BUFFER_MODE_TEMPLATE_STRING "overwrite"
++#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_SPLICE
++#include "lttng-ring-buffer-client.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("LTTng Ring Buffer Client Overwrite Mode");
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-ring-buffer-client.h
+@@ -0,0 +1,600 @@
++/*
++ * lttng-ring-buffer-client.h
++ *
++ * LTTng lib ring buffer client template.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include "lib/bitfield.h"
++#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
++#include "wrapper/trace-clock.h"
++#include "lttng-events.h"
++#include "lttng-tracer.h"
++#include "wrapper/ringbuffer/frontend_types.h"
++
++#define LTTNG_COMPACT_EVENT_BITS 5
++#define LTTNG_COMPACT_TSC_BITS 27
++
++/*
++ * Keep the natural field alignment for _each field_ within this structure if
++ * you ever add/remove a field from this header. Packed attribute is not used
++ * because gcc generates poor code on at least powerpc and mips. Don't ever
++ * let gcc add padding between the structure elements.
++ *
++ * The guarantee we have with timestamps is that all the events in a
++ * packet are included (inclusive) within the begin/end timestamps of
++ * the packet. Another guarantee we have is that the "timestamp begin",
++ * as well as the event timestamps, are monotonically increasing (never
++ * decrease) when moving forward in a stream (physically). But this
++ * guarantee does not apply to "timestamp end", because it is sampled at
++ * commit time, which is not ordered with respect to space reservation.
++ */
++
++struct packet_header {
++ /* Trace packet header */
++ uint32_t magic; /*
++ * Trace magic number.
++ * contains endianness information.
++ */
++ uint8_t uuid[16];
++ uint32_t stream_id;
++
++ struct {
++ /* Stream packet context */
++ uint64_t timestamp_begin; /* Cycle count at subbuffer start */
++ uint64_t timestamp_end; /* Cycle count at subbuffer end */
++ uint64_t content_size; /* Size of data in subbuffer */
++ uint64_t packet_size; /* Subbuffer size (include padding) */
++ unsigned long events_discarded; /*
++ * Events lost in this subbuffer since
++ * the beginning of the trace.
++ * (may overflow)
++ */
++ uint32_t cpu_id; /* CPU id associated with stream */
++ uint8_t header_end; /* End of header */
++ } ctx;
++};
++
++
++static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
++{
++ return trace_clock_read64();
++}
++
++static inline
++size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
++{
++ int i;
++ size_t orig_offset = offset;
++
++ if (likely(!ctx))
++ return 0;
++ for (i = 0; i < ctx->nr_fields; i++)
++ offset += ctx->fields[i].get_size(offset);
++ return offset - orig_offset;
++}
++
++static inline
++void ctx_record(struct lib_ring_buffer_ctx *bufctx,
++ struct lttng_channel *chan,
++ struct lttng_ctx *ctx)
++{
++ int i;
++
++ if (likely(!ctx))
++ return;
++ for (i = 0; i < ctx->nr_fields; i++)
++ ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
++}
++
++/*
++ * record_header_size - Calculate the header size and padding necessary.
++ * @config: ring buffer instance configuration
++ * @chan: channel
++ * @offset: offset in the write buffer
++ * @pre_header_padding: padding to add before the header (output)
++ * @ctx: reservation context
++ *
++ * Returns the event header size (including padding).
++ *
++ * The payload must itself determine its own alignment from the biggest type it
++ * contains.
++ */
++static __inline__
++unsigned char record_header_size(const struct lib_ring_buffer_config *config,
++ struct channel *chan, size_t offset,
++ size_t *pre_header_padding,
++ struct lib_ring_buffer_ctx *ctx)
++{
++ struct lttng_channel *lttng_chan = channel_get_private(chan);
++ struct lttng_event *event = ctx->priv;
++ size_t orig_offset = offset;
++ size_t padding;
++
++ switch (lttng_chan->header_type) {
++ case 1: /* compact */
++ padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
++ offset += padding;
++ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
++ offset += sizeof(uint32_t); /* id and timestamp */
++ } else {
++ /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
++ offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
++ /* Align extended struct on largest member */
++ offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
++ offset += sizeof(uint32_t); /* id */
++ offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
++ offset += sizeof(uint64_t); /* timestamp */
++ }
++ break;
++ case 2: /* large */
++ padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
++ offset += padding;
++ offset += sizeof(uint16_t);
++ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
++ offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
++ offset += sizeof(uint32_t); /* timestamp */
++ } else {
++ /* Align extended struct on largest member */
++ offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
++ offset += sizeof(uint32_t); /* id */
++ offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
++ offset += sizeof(uint64_t); /* timestamp */
++ }
++ break;
++ default:
++ padding = 0;
++ WARN_ON_ONCE(1);
++ }
++ offset += ctx_get_size(offset, event->ctx);
++ offset += ctx_get_size(offset, lttng_chan->ctx);
++
++ *pre_header_padding = padding;
++ return offset - orig_offset;
++}
++
++#include "wrapper/ringbuffer/api.h"
++
++static
++void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer_ctx *ctx,
++ uint32_t event_id);
++
++/*
++ * lttng_write_event_header
++ *
++ * Writes the event header to the offset (already aligned on 32-bits).
++ *
++ * @config: ring buffer instance configuration
++ * @ctx: reservation context
++ * @event_id: event ID
++ */
++static __inline__
++void lttng_write_event_header(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer_ctx *ctx,
++ uint32_t event_id)
++{
++ struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
++ struct lttng_event *event = ctx->priv;
++
++ if (unlikely(ctx->rflags))
++ goto slow_path;
++
++ switch (lttng_chan->header_type) {
++ case 1: /* compact */
++ {
++ uint32_t id_time = 0;
++
++ bt_bitfield_write(&id_time, uint32_t,
++ 0,
++ LTTNG_COMPACT_EVENT_BITS,
++ event_id);
++ bt_bitfield_write(&id_time, uint32_t,
++ LTTNG_COMPACT_EVENT_BITS,
++ LTTNG_COMPACT_TSC_BITS,
++ ctx->tsc);
++ lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
++ break;
++ }
++ case 2: /* large */
++ {
++ uint32_t timestamp = (uint32_t) ctx->tsc;
++ uint16_t id = event_id;
++
++ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
++ lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
++ lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
++ break;
++ }
++ default:
++ WARN_ON_ONCE(1);
++ }
++
++ ctx_record(ctx, lttng_chan, lttng_chan->ctx);
++ ctx_record(ctx, lttng_chan, event->ctx);
++ lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
++
++ return;
++
++slow_path:
++ lttng_write_event_header_slow(config, ctx, event_id);
++}
++
++static
++void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
++ struct lib_ring_buffer_ctx *ctx,
++ uint32_t event_id)
++{
++ struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
++ struct lttng_event *event = ctx->priv;
++
++ switch (lttng_chan->header_type) {
++ case 1: /* compact */
++ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
++ uint32_t id_time = 0;
++
++ bt_bitfield_write(&id_time, uint32_t,
++ 0,
++ LTTNG_COMPACT_EVENT_BITS,
++ event_id);
++ bt_bitfield_write(&id_time, uint32_t,
++ LTTNG_COMPACT_EVENT_BITS,
++ LTTNG_COMPACT_TSC_BITS, ctx->tsc);
++ lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
++ } else {
++ uint8_t id = 0;
++ uint64_t timestamp = ctx->tsc;
++
++ bt_bitfield_write(&id, uint8_t,
++ 0,
++ LTTNG_COMPACT_EVENT_BITS,
++ 31);
++ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
++ /* Align extended struct on largest member */
++ lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
++ lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
++ lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
++ lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
++ }
++ break;
++ case 2: /* large */
++ {
++ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
++ uint32_t timestamp = (uint32_t) ctx->tsc;
++ uint16_t id = event_id;
++
++ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
++ lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
++ lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
++ } else {
++ uint16_t id = 65535;
++ uint64_t timestamp = ctx->tsc;
++
++ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
++ /* Align extended struct on largest member */
++ lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
++ lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
++ lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
++ lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
++ }
++ break;
++ }
++ default:
++ WARN_ON_ONCE(1);
++ }
++ ctx_record(ctx, lttng_chan, lttng_chan->ctx);
++ ctx_record(ctx, lttng_chan, event->ctx);
++ lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
++}
++
++static const struct lib_ring_buffer_config client_config;
++
++static u64 client_ring_buffer_clock_read(struct channel *chan)
++{
++ return lib_ring_buffer_clock_read(chan);
++}
++
++static
++size_t client_record_header_size(const struct lib_ring_buffer_config *config,
++ struct channel *chan, size_t offset,
++ size_t *pre_header_padding,
++ struct lib_ring_buffer_ctx *ctx)
++{
++ return record_header_size(config, chan, offset,
++ pre_header_padding, ctx);
++}
++
++/**
++ * client_packet_header_size - called on buffer-switch to a new sub-buffer
++ *
++ * Return header size without padding after the structure. Don't use packed
++ * structure because gcc generates inefficient code on some architectures
++ * (powerpc, mips..)
++ */
++static size_t client_packet_header_size(void)
++{
++ return offsetof(struct packet_header, ctx.header_end);
++}
++
++static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
++ unsigned int subbuf_idx)
++{
++ struct channel *chan = buf->backend.chan;
++ struct packet_header *header =
++ (struct packet_header *)
++ lib_ring_buffer_offset_address(&buf->backend,
++ subbuf_idx * chan->backend.subbuf_size);
++ struct lttng_channel *lttng_chan = channel_get_private(chan);
++ struct lttng_session *session = lttng_chan->session;
++
++ header->magic = CTF_MAGIC_NUMBER;
++ memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
++ header->stream_id = lttng_chan->id;
++ header->ctx.timestamp_begin = tsc;
++ header->ctx.timestamp_end = 0;
++ header->ctx.content_size = ~0ULL; /* for debugging */
++ header->ctx.packet_size = ~0ULL;
++ header->ctx.events_discarded = 0;
++ header->ctx.cpu_id = buf->backend.cpu;
++}
++
++/*
++ * offset is assumed to never be 0 here : never deliver a completely empty
++ * subbuffer. data_size is between 1 and subbuf_size.
++ */
++static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
++ unsigned int subbuf_idx, unsigned long data_size)
++{
++ struct channel *chan = buf->backend.chan;
++ struct packet_header *header =
++ (struct packet_header *)
++ lib_ring_buffer_offset_address(&buf->backend,
++ subbuf_idx * chan->backend.subbuf_size);
++ unsigned long records_lost = 0;
++
++ header->ctx.timestamp_end = tsc;
++ header->ctx.content_size =
++ (uint64_t) data_size * CHAR_BIT; /* in bits */
++ header->ctx.packet_size =
++ (uint64_t) PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
++ records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
++ records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
++ records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
++ header->ctx.events_discarded = records_lost;
++}
++
++static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
++ int cpu, const char *name)
++{
++ return 0;
++}
++
++static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
++{
++}
++
++static const struct lib_ring_buffer_config client_config = {
++ .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
++ .cb.record_header_size = client_record_header_size,
++ .cb.subbuffer_header_size = client_packet_header_size,
++ .cb.buffer_begin = client_buffer_begin,
++ .cb.buffer_end = client_buffer_end,
++ .cb.buffer_create = client_buffer_create,
++ .cb.buffer_finalize = client_buffer_finalize,
++
++ .tsc_bits = LTTNG_COMPACT_TSC_BITS,
++ .alloc = RING_BUFFER_ALLOC_PER_CPU,
++ .sync = RING_BUFFER_SYNC_PER_CPU,
++ .mode = RING_BUFFER_MODE_TEMPLATE,
++ .backend = RING_BUFFER_PAGE,
++ .output = RING_BUFFER_OUTPUT_TEMPLATE,
++ .oops = RING_BUFFER_OOPS_CONSISTENCY,
++ .ipi = RING_BUFFER_IPI_BARRIER,
++ .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
++};
++
++static
++struct channel *_channel_create(const char *name,
++ struct lttng_channel *lttng_chan, void *buf_addr,
++ size_t subbuf_size, size_t num_subbuf,
++ unsigned int switch_timer_interval,
++ unsigned int read_timer_interval)
++{
++ return channel_create(&client_config, name, lttng_chan, buf_addr,
++ subbuf_size, num_subbuf, switch_timer_interval,
++ read_timer_interval);
++}
++
++static
++void lttng_channel_destroy(struct channel *chan)
++{
++ channel_destroy(chan);
++}
++
++static
++struct lib_ring_buffer *lttng_buffer_read_open(struct channel *chan)
++{
++ struct lib_ring_buffer *buf;
++ int cpu;
++
++ for_each_channel_cpu(cpu, chan) {
++ buf = channel_get_ring_buffer(&client_config, chan, cpu);
++ if (!lib_ring_buffer_open_read(buf))
++ return buf;
++ }
++ return NULL;
++}
++
++static
++int lttng_buffer_has_read_closed_stream(struct channel *chan)
++{
++ struct lib_ring_buffer *buf;
++ int cpu;
++
++ for_each_channel_cpu(cpu, chan) {
++ buf = channel_get_ring_buffer(&client_config, chan, cpu);
++ if (!atomic_long_read(&buf->active_readers))
++ return 1;
++ }
++ return 0;
++}
++
++static
++void lttng_buffer_read_close(struct lib_ring_buffer *buf)
++{
++ lib_ring_buffer_release_read(buf);
++}
++
++static
++int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx,
++ uint32_t event_id)
++{
++ struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
++ int ret, cpu;
++
++ cpu = lib_ring_buffer_get_cpu(&client_config);
++ if (cpu < 0)
++ return -EPERM;
++ ctx->cpu = cpu;
++
++ switch (lttng_chan->header_type) {
++ case 1: /* compact */
++ if (event_id > 30)
++ ctx->rflags |= LTTNG_RFLAG_EXTENDED;
++ break;
++ case 2: /* large */
++ if (event_id > 65534)
++ ctx->rflags |= LTTNG_RFLAG_EXTENDED;
++ break;
++ default:
++ WARN_ON_ONCE(1);
++ }
++
++ ret = lib_ring_buffer_reserve(&client_config, ctx);
++ if (ret)
++ goto put;
++ lttng_write_event_header(&client_config, ctx, event_id);
++ return 0;
++put:
++ lib_ring_buffer_put_cpu(&client_config);
++ return ret;
++}
++
++static
++void lttng_event_commit(struct lib_ring_buffer_ctx *ctx)
++{
++ lib_ring_buffer_commit(&client_config, ctx);
++ lib_ring_buffer_put_cpu(&client_config);
++}
++
++static
++void lttng_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
++ size_t len)
++{
++ lib_ring_buffer_write(&client_config, ctx, src, len);
++}
++
++static
++void lttng_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
++ const void __user *src, size_t len)
++{
++ lib_ring_buffer_copy_from_user_inatomic(&client_config, ctx, src, len);
++}
++
++static
++void lttng_event_memset(struct lib_ring_buffer_ctx *ctx,
++ int c, size_t len)
++{
++ lib_ring_buffer_memset(&client_config, ctx, c, len);
++}
++
++static
++wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
++{
++ struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
++ chan, cpu);
++ return &buf->write_wait;
++}
++
++static
++wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
++{
++ return &chan->hp_wait;
++}
++
++static
++int lttng_is_finalized(struct channel *chan)
++{
++ return lib_ring_buffer_channel_is_finalized(chan);
++}
++
++static
++int lttng_is_disabled(struct channel *chan)
++{
++ return lib_ring_buffer_channel_is_disabled(chan);
++}
++
++static struct lttng_transport lttng_relay_transport = {
++ .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
++ .owner = THIS_MODULE,
++ .ops = {
++ .channel_create = _channel_create,
++ .channel_destroy = lttng_channel_destroy,
++ .buffer_read_open = lttng_buffer_read_open,
++ .buffer_has_read_closed_stream =
++ lttng_buffer_has_read_closed_stream,
++ .buffer_read_close = lttng_buffer_read_close,
++ .event_reserve = lttng_event_reserve,
++ .event_commit = lttng_event_commit,
++ .event_write = lttng_event_write,
++ .event_write_from_user = lttng_event_write_from_user,
++ .event_memset = lttng_event_memset,
++ .packet_avail_size = NULL, /* Would be racy anyway */
++ .get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue,
++ .get_hp_wait_queue = lttng_get_hp_wait_queue,
++ .is_finalized = lttng_is_finalized,
++ .is_disabled = lttng_is_disabled,
++ },
++};
++
++static int __init lttng_ring_buffer_client_init(void)
++{
++ /*
++ * This vmalloc sync all also takes care of the lib ring buffer
++ * vmalloc'd module pages when it is built as a module into LTTng.
++ */
++ wrapper_vmalloc_sync_all();
++ lttng_transport_register(&lttng_relay_transport);
++ return 0;
++}
++
++module_init(lttng_ring_buffer_client_init);
++
++static void __exit lttng_ring_buffer_client_exit(void)
++{
++ lttng_transport_unregister(&lttng_relay_transport);
++}
++
++module_exit(lttng_ring_buffer_client_exit);
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
++ " client");
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-ring-buffer-metadata-client.c
+@@ -0,0 +1,33 @@
++/*
++ * lttng-ring-buffer-metadata-client.c
++ *
++ * LTTng lib ring buffer metadta client.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include "lttng-tracer.h"
++
++#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
++#define RING_BUFFER_MODE_TEMPLATE_STRING "metadata"
++#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_SPLICE
++#include "lttng-ring-buffer-metadata-client.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("LTTng Ring Buffer Metadata Client");
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-ring-buffer-metadata-client.h
+@@ -0,0 +1,342 @@
++/*
++ * lttng-ring-buffer-client.h
++ *
++ * LTTng lib ring buffer client template.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
++#include "lttng-events.h"
++#include "lttng-tracer.h"
++
++struct metadata_packet_header {
++ uint32_t magic; /* 0x75D11D57 */
++ uint8_t uuid[16]; /* Unique Universal Identifier */
++ uint32_t checksum; /* 0 if unused */
++ uint32_t content_size; /* in bits */
++ uint32_t packet_size; /* in bits */
++ uint8_t compression_scheme; /* 0 if unused */
++ uint8_t encryption_scheme; /* 0 if unused */
++ uint8_t checksum_scheme; /* 0 if unused */
++ uint8_t major; /* CTF spec major version number */
++ uint8_t minor; /* CTF spec minor version number */
++ uint8_t header_end[0];
++};
++
++struct metadata_record_header {
++ uint8_t header_end[0]; /* End of header */
++};
++
++static const struct lib_ring_buffer_config client_config;
++
++static inline
++u64 lib_ring_buffer_clock_read(struct channel *chan)
++{
++ return 0;
++}
++
++static inline
++unsigned char record_header_size(const struct lib_ring_buffer_config *config,
++ struct channel *chan, size_t offset,
++ size_t *pre_header_padding,
++ struct lib_ring_buffer_ctx *ctx)
++{
++ return 0;
++}
++
++#include "wrapper/ringbuffer/api.h"
++
++static u64 client_ring_buffer_clock_read(struct channel *chan)
++{
++ return 0;
++}
++
++static
++size_t client_record_header_size(const struct lib_ring_buffer_config *config,
++ struct channel *chan, size_t offset,
++ size_t *pre_header_padding,
++ struct lib_ring_buffer_ctx *ctx)
++{
++ return 0;
++}
++
++/**
++ * client_packet_header_size - called on buffer-switch to a new sub-buffer
++ *
++ * Return header size without padding after the structure. Don't use packed
++ * structure because gcc generates inefficient code on some architectures
++ * (powerpc, mips..)
++ */
++static size_t client_packet_header_size(void)
++{
++ return offsetof(struct metadata_packet_header, header_end);
++}
++
++static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
++ unsigned int subbuf_idx)
++{
++ struct channel *chan = buf->backend.chan;
++ struct metadata_packet_header *header =
++ (struct metadata_packet_header *)
++ lib_ring_buffer_offset_address(&buf->backend,
++ subbuf_idx * chan->backend.subbuf_size);
++ struct lttng_channel *lttng_chan = channel_get_private(chan);
++ struct lttng_session *session = lttng_chan->session;
++
++ header->magic = TSDL_MAGIC_NUMBER;
++ memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
++ header->checksum = 0; /* 0 if unused */
++ header->content_size = 0xFFFFFFFF; /* in bits, for debugging */
++ header->packet_size = 0xFFFFFFFF; /* in bits, for debugging */
++ header->compression_scheme = 0; /* 0 if unused */
++ header->encryption_scheme = 0; /* 0 if unused */
++ header->checksum_scheme = 0; /* 0 if unused */
++ header->major = CTF_SPEC_MAJOR;
++ header->minor = CTF_SPEC_MINOR;
++}
++
++/*
++ * offset is assumed to never be 0 here : never deliver a completely empty
++ * subbuffer. data_size is between 1 and subbuf_size.
++ */
++static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
++ unsigned int subbuf_idx, unsigned long data_size)
++{
++ struct channel *chan = buf->backend.chan;
++ struct metadata_packet_header *header =
++ (struct metadata_packet_header *)
++ lib_ring_buffer_offset_address(&buf->backend,
++ subbuf_idx * chan->backend.subbuf_size);
++ unsigned long records_lost = 0;
++
++ header->content_size = data_size * CHAR_BIT; /* in bits */
++ header->packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
++ /*
++ * We do not care about the records lost count, because the metadata
++ * channel waits and retry.
++ */
++ (void) lib_ring_buffer_get_records_lost_full(&client_config, buf);
++ records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
++ records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
++ WARN_ON_ONCE(records_lost != 0);
++}
++
++static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
++ int cpu, const char *name)
++{
++ return 0;
++}
++
++static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
++{
++}
++
++static const struct lib_ring_buffer_config client_config = {
++ .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
++ .cb.record_header_size = client_record_header_size,
++ .cb.subbuffer_header_size = client_packet_header_size,
++ .cb.buffer_begin = client_buffer_begin,
++ .cb.buffer_end = client_buffer_end,
++ .cb.buffer_create = client_buffer_create,
++ .cb.buffer_finalize = client_buffer_finalize,
++
++ .tsc_bits = 0,
++ .alloc = RING_BUFFER_ALLOC_GLOBAL,
++ .sync = RING_BUFFER_SYNC_GLOBAL,
++ .mode = RING_BUFFER_MODE_TEMPLATE,
++ .backend = RING_BUFFER_PAGE,
++ .output = RING_BUFFER_OUTPUT_TEMPLATE,
++ .oops = RING_BUFFER_OOPS_CONSISTENCY,
++ .ipi = RING_BUFFER_IPI_BARRIER,
++ .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
++};
++
++static
++struct channel *_channel_create(const char *name,
++ struct lttng_channel *lttng_chan, void *buf_addr,
++ size_t subbuf_size, size_t num_subbuf,
++ unsigned int switch_timer_interval,
++ unsigned int read_timer_interval)
++{
++ return channel_create(&client_config, name, lttng_chan, buf_addr,
++ subbuf_size, num_subbuf, switch_timer_interval,
++ read_timer_interval);
++}
++
++static
++void lttng_channel_destroy(struct channel *chan)
++{
++ channel_destroy(chan);
++}
++
++static
++struct lib_ring_buffer *lttng_buffer_read_open(struct channel *chan)
++{
++ struct lib_ring_buffer *buf;
++
++ buf = channel_get_ring_buffer(&client_config, chan, 0);
++ if (!lib_ring_buffer_open_read(buf))
++ return buf;
++ return NULL;
++}
++
++static
++int lttng_buffer_has_read_closed_stream(struct channel *chan)
++{
++ struct lib_ring_buffer *buf;
++ int cpu;
++
++ for_each_channel_cpu(cpu, chan) {
++ buf = channel_get_ring_buffer(&client_config, chan, cpu);
++ if (!atomic_long_read(&buf->active_readers))
++ return 1;
++ }
++ return 0;
++}
++
++static
++void lttng_buffer_read_close(struct lib_ring_buffer *buf)
++{
++ lib_ring_buffer_release_read(buf);
++}
++
++static
++int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id)
++{
++ return lib_ring_buffer_reserve(&client_config, ctx);
++}
++
++static
++void lttng_event_commit(struct lib_ring_buffer_ctx *ctx)
++{
++ lib_ring_buffer_commit(&client_config, ctx);
++}
++
++static
++void lttng_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
++ size_t len)
++{
++ lib_ring_buffer_write(&client_config, ctx, src, len);
++}
++
++static
++void lttng_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
++ const void __user *src, size_t len)
++{
++ lib_ring_buffer_copy_from_user_inatomic(&client_config, ctx, src, len);
++}
++
++static
++void lttng_event_memset(struct lib_ring_buffer_ctx *ctx,
++ int c, size_t len)
++{
++ lib_ring_buffer_memset(&client_config, ctx, c, len);
++}
++
++static
++size_t lttng_packet_avail_size(struct channel *chan)
++
++{
++ unsigned long o_begin;
++ struct lib_ring_buffer *buf;
++
++ buf = chan->backend.buf; /* Only for global buffer ! */
++ o_begin = v_read(&client_config, &buf->offset);
++ if (subbuf_offset(o_begin, chan) != 0) {
++ return chan->backend.subbuf_size - subbuf_offset(o_begin, chan);
++ } else {
++ return chan->backend.subbuf_size - subbuf_offset(o_begin, chan)
++ - sizeof(struct metadata_packet_header);
++ }
++}
++
++static
++wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
++{
++ struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
++ chan, cpu);
++ return &buf->write_wait;
++}
++
++static
++wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
++{
++ return &chan->hp_wait;
++}
++
++static
++int lttng_is_finalized(struct channel *chan)
++{
++ return lib_ring_buffer_channel_is_finalized(chan);
++}
++
++static
++int lttng_is_disabled(struct channel *chan)
++{
++ return lib_ring_buffer_channel_is_disabled(chan);
++}
++
++static struct lttng_transport lttng_relay_transport = {
++ .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
++ .owner = THIS_MODULE,
++ .ops = {
++ .channel_create = _channel_create,
++ .channel_destroy = lttng_channel_destroy,
++ .buffer_read_open = lttng_buffer_read_open,
++ .buffer_has_read_closed_stream =
++ lttng_buffer_has_read_closed_stream,
++ .buffer_read_close = lttng_buffer_read_close,
++ .event_reserve = lttng_event_reserve,
++ .event_commit = lttng_event_commit,
++ .event_write_from_user = lttng_event_write_from_user,
++ .event_memset = lttng_event_memset,
++ .event_write = lttng_event_write,
++ .packet_avail_size = lttng_packet_avail_size,
++ .get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue,
++ .get_hp_wait_queue = lttng_get_hp_wait_queue,
++ .is_finalized = lttng_is_finalized,
++ .is_disabled = lttng_is_disabled,
++ },
++};
++
++static int __init lttng_ring_buffer_client_init(void)
++{
++ /*
++ * This vmalloc sync all also takes care of the lib ring buffer
++ * vmalloc'd module pages when it is built as a module into LTTng.
++ */
++ wrapper_vmalloc_sync_all();
++ lttng_transport_register(&lttng_relay_transport);
++ return 0;
++}
++
++module_init(lttng_ring_buffer_client_init);
++
++static void __exit lttng_ring_buffer_client_exit(void)
++{
++ lttng_transport_unregister(&lttng_relay_transport);
++}
++
++module_exit(lttng_ring_buffer_client_exit);
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
++ " client");
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-ring-buffer-metadata-mmap-client.c
+@@ -0,0 +1,33 @@
++/*
++ * lttng-ring-buffer-metadata-client.c
++ *
++ * LTTng lib ring buffer metadta client.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include "lttng-tracer.h"
++
++#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
++#define RING_BUFFER_MODE_TEMPLATE_STRING "metadata-mmap"
++#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_MMAP
++#include "lttng-ring-buffer-metadata-client.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("LTTng Ring Buffer Metadata Client");
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-statedump-impl.c
+@@ -0,0 +1,427 @@
++/*
++ * lttng-statedump.c
++ *
++ * Linux Trace Toolkit Next Generation Kernel State Dump
++ *
++ * Copyright 2005 Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca>
++ * Copyright 2006-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ *
++ * Changes:
++ * Eric Clement: Add listing of network IP interface
++ * 2006, 2007 Mathieu Desnoyers Fix kernel threads
++ * Various updates
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/netlink.h>
++#include <linux/inet.h>
++#include <linux/ip.h>
++#include <linux/kthread.h>
++#include <linux/proc_fs.h>
++#include <linux/file.h>
++#include <linux/interrupt.h>
++#include <linux/irqnr.h>
++#include <linux/cpu.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/fdtable.h>
++#include <linux/swap.h>
++#include <linux/wait.h>
++#include <linux/mutex.h>
++
++#include "lttng-events.h"
++#include "wrapper/irqdesc.h"
++#include "wrapper/spinlock.h"
++#include "wrapper/fdtable.h"
++#include "wrapper/nsproxy.h"
++#include "wrapper/irq.h"
++
++#ifdef CONFIG_LTTNG_HAS_LIST_IRQ
++#include <linux/irq.h>
++#endif
++
++/* Define the tracepoints, but do not build the probes */
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++#define TRACE_INCLUDE_FILE lttng-statedump
++#include "instrumentation/events/lttng-module/lttng-statedump.h"
++
++struct lttng_fd_ctx {
++ char *page;
++ struct lttng_session *session;
++ struct task_struct *p;
++};
++
++/*
++ * Protected by the trace lock.
++ */
++static struct delayed_work cpu_work[NR_CPUS];
++static DECLARE_WAIT_QUEUE_HEAD(statedump_wq);
++static atomic_t kernel_threads_to_run;
++
++enum lttng_thread_type {
++ LTTNG_USER_THREAD = 0,
++ LTTNG_KERNEL_THREAD = 1,
++};
++
++enum lttng_execution_mode {
++ LTTNG_USER_MODE = 0,
++ LTTNG_SYSCALL = 1,
++ LTTNG_TRAP = 2,
++ LTTNG_IRQ = 3,
++ LTTNG_SOFTIRQ = 4,
++ LTTNG_MODE_UNKNOWN = 5,
++};
++
++enum lttng_execution_submode {
++ LTTNG_NONE = 0,
++ LTTNG_UNKNOWN = 1,
++};
++
++enum lttng_process_status {
++ LTTNG_UNNAMED = 0,
++ LTTNG_WAIT_FORK = 1,
++ LTTNG_WAIT_CPU = 2,
++ LTTNG_EXIT = 3,
++ LTTNG_ZOMBIE = 4,
++ LTTNG_WAIT = 5,
++ LTTNG_RUN = 6,
++ LTTNG_DEAD = 7,
++};
++
++#ifdef CONFIG_INET
++static
++void lttng_enumerate_device(struct lttng_session *session,
++ struct net_device *dev)
++{
++ struct in_device *in_dev;
++ struct in_ifaddr *ifa;
++
++ if (dev->flags & IFF_UP) {
++ in_dev = in_dev_get(dev);
++ if (in_dev) {
++ for (ifa = in_dev->ifa_list; ifa != NULL;
++ ifa = ifa->ifa_next) {
++ trace_lttng_statedump_network_interface(
++ session, dev, ifa);
++ }
++ in_dev_put(in_dev);
++ }
++ } else {
++ trace_lttng_statedump_network_interface(
++ session, dev, NULL);
++ }
++}
++
++static
++int lttng_enumerate_network_ip_interface(struct lttng_session *session)
++{
++ struct net_device *dev;
++
++ read_lock(&dev_base_lock);
++ for_each_netdev(&init_net, dev)
++ lttng_enumerate_device(session, dev);
++ read_unlock(&dev_base_lock);
++
++ return 0;
++}
++#else /* CONFIG_INET */
++static inline
++int lttng_enumerate_network_ip_interface(struct lttng_session *session)
++{
++ return 0;
++}
++#endif /* CONFIG_INET */
++
++static
++int lttng_dump_one_fd(const void *p, struct file *file, unsigned int fd)
++{
++ const struct lttng_fd_ctx *ctx = p;
++ const char *s = d_path(&file->f_path, ctx->page, PAGE_SIZE);
++
++ if (IS_ERR(s)) {
++ struct dentry *dentry = file->f_path.dentry;
++
++ /* Make sure we give at least some info */
++ spin_lock(&dentry->d_lock);
++ trace_lttng_statedump_file_descriptor(ctx->session, ctx->p, fd,
++ dentry->d_name.name);
++ spin_unlock(&dentry->d_lock);
++ goto end;
++ }
++ trace_lttng_statedump_file_descriptor(ctx->session, ctx->p, fd, s);
++end:
++ return 0;
++}
++
++static
++void lttng_enumerate_task_fd(struct lttng_session *session,
++ struct task_struct *p, char *tmp)
++{
++ struct lttng_fd_ctx ctx = { .page = tmp, .session = session, .p = p };
++
++ task_lock(p);
++ lttng_iterate_fd(p->files, 0, lttng_dump_one_fd, &ctx);
++ task_unlock(p);
++}
++
++static
++int lttng_enumerate_file_descriptors(struct lttng_session *session)
++{
++ struct task_struct *p;
++ char *tmp = (char *) __get_free_page(GFP_KERNEL);
++
++ /* Enumerate active file descriptors */
++ rcu_read_lock();
++ for_each_process(p)
++ lttng_enumerate_task_fd(session, p, tmp);
++ rcu_read_unlock();
++ free_page((unsigned long) tmp);
++ return 0;
++}
++
++#if 0
++/*
++ * FIXME: we cannot take a mmap_sem while in a RCU read-side critical section
++ * (scheduling in atomic). Normally, the tasklist lock protects this kind of
++ * iteration, but it is not exported to modules.
++ */
++static
++void lttng_enumerate_task_vm_maps(struct lttng_session *session,
++ struct task_struct *p)
++{
++ struct mm_struct *mm;
++ struct vm_area_struct *map;
++ unsigned long ino;
++
++ /* get_task_mm does a task_lock... */
++ mm = get_task_mm(p);
++ if (!mm)
++ return;
++
++ map = mm->mmap;
++ if (map) {
++ down_read(&mm->mmap_sem);
++ while (map) {
++ if (map->vm_file)
++ ino = map->vm_file->f_dentry->d_inode->i_ino;
++ else
++ ino = 0;
++ trace_lttng_statedump_vm_map(session, p, map, ino);
++ map = map->vm_next;
++ }
++ up_read(&mm->mmap_sem);
++ }
++ mmput(mm);
++}
++
++static
++int lttng_enumerate_vm_maps(struct lttng_session *session)
++{
++ struct task_struct *p;
++
++ rcu_read_lock();
++ for_each_process(p)
++ lttng_enumerate_task_vm_maps(session, p);
++ rcu_read_unlock();
++ return 0;
++}
++#endif
++
++#ifdef CONFIG_LTTNG_HAS_LIST_IRQ
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
++#define irq_desc_get_chip(desc) get_irq_desc_chip(desc)
++#endif
++
++static
++void lttng_list_interrupts(struct lttng_session *session)
++{
++ unsigned int irq;
++ unsigned long flags = 0;
++ struct irq_desc *desc;
++
++#define irq_to_desc wrapper_irq_to_desc
++ /* needs irq_desc */
++ for_each_irq_desc(irq, desc) {
++ struct irqaction *action;
++ const char *irq_chip_name =
++ irq_desc_get_chip(desc)->name ? : "unnamed_irq_chip";
++
++ local_irq_save(flags);
++ wrapper_desc_spin_lock(&desc->lock);
++ for (action = desc->action; action; action = action->next) {
++ trace_lttng_statedump_interrupt(session,
++ irq, irq_chip_name, action);
++ }
++ wrapper_desc_spin_unlock(&desc->lock);
++ local_irq_restore(flags);
++ }
++#undef irq_to_desc
++}
++#else
++static inline
++void lttng_list_interrupts(struct lttng_session *session)
++{
++}
++#endif
++
++static
++void lttng_statedump_process_ns(struct lttng_session *session,
++ struct task_struct *p,
++ enum lttng_thread_type type,
++ enum lttng_execution_mode mode,
++ enum lttng_execution_submode submode,
++ enum lttng_process_status status)
++{
++ struct nsproxy *proxy;
++ struct pid_namespace *pid_ns;
++
++ rcu_read_lock();
++ proxy = task_nsproxy(p);
++ if (proxy) {
++ pid_ns = lttng_get_proxy_pid_ns(proxy);
++ do {
++ trace_lttng_statedump_process_state(session,
++ p, type, mode, submode, status, pid_ns);
++ pid_ns = pid_ns->parent;
++ } while (pid_ns);
++ } else {
++ trace_lttng_statedump_process_state(session,
++ p, type, mode, submode, status, NULL);
++ }
++ rcu_read_unlock();
++}
++
++static
++int lttng_enumerate_process_states(struct lttng_session *session)
++{
++ struct task_struct *g, *p;
++
++ rcu_read_lock();
++ for_each_process(g) {
++ p = g;
++ do {
++ enum lttng_execution_mode mode =
++ LTTNG_MODE_UNKNOWN;
++ enum lttng_execution_submode submode =
++ LTTNG_UNKNOWN;
++ enum lttng_process_status status;
++ enum lttng_thread_type type;
++
++ task_lock(p);
++ if (p->exit_state == EXIT_ZOMBIE)
++ status = LTTNG_ZOMBIE;
++ else if (p->exit_state == EXIT_DEAD)
++ status = LTTNG_DEAD;
++ else if (p->state == TASK_RUNNING) {
++ /* Is this a forked child that has not run yet? */
++ if (list_empty(&p->rt.run_list))
++ status = LTTNG_WAIT_FORK;
++ else
++ /*
++ * All tasks are considered as wait_cpu;
++ * the viewer will sort out if the task
++ * was really running at this time.
++ */
++ status = LTTNG_WAIT_CPU;
++ } else if (p->state &
++ (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) {
++ /* Task is waiting for something to complete */
++ status = LTTNG_WAIT;
++ } else
++ status = LTTNG_UNNAMED;
++ submode = LTTNG_NONE;
++
++ /*
++ * Verification of t->mm is to filter out kernel
++ * threads; Viewer will further filter out if a
++ * user-space thread was in syscall mode or not.
++ */
++ if (p->mm)
++ type = LTTNG_USER_THREAD;
++ else
++ type = LTTNG_KERNEL_THREAD;
++ lttng_statedump_process_ns(session,
++ p, type, mode, submode, status);
++ task_unlock(p);
++ } while_each_thread(g, p);
++ }
++ rcu_read_unlock();
++
++ return 0;
++}
++
++static
++void lttng_statedump_work_func(struct work_struct *work)
++{
++ if (atomic_dec_and_test(&kernel_threads_to_run))
++ /* If we are the last thread, wake up do_lttng_statedump */
++ wake_up(&statedump_wq);
++}
++
++static
++int do_lttng_statedump(struct lttng_session *session)
++{
++ int cpu;
++
++ trace_lttng_statedump_start(session);
++ lttng_enumerate_process_states(session);
++ lttng_enumerate_file_descriptors(session);
++ /* FIXME lttng_enumerate_vm_maps(session); */
++ lttng_list_interrupts(session);
++ lttng_enumerate_network_ip_interface(session);
++
++ /* TODO lttng_dump_idt_table(session); */
++ /* TODO lttng_dump_softirq_vec(session); */
++ /* TODO lttng_list_modules(session); */
++ /* TODO lttng_dump_swap_files(session); */
++
++ /*
++ * Fire off a work queue on each CPU. Their sole purpose in life
++ * is to guarantee that each CPU has been in a state where is was in
++ * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
++ */
++ get_online_cpus();
++ atomic_set(&kernel_threads_to_run, num_online_cpus());
++ for_each_online_cpu(cpu) {
++ INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func);
++ schedule_delayed_work_on(cpu, &cpu_work[cpu], 0);
++ }
++ /* Wait for all threads to run */
++ __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0));
++ put_online_cpus();
++ /* Our work is done */
++ trace_lttng_statedump_end(session);
++ return 0;
++}
++
++/*
++ * Called with session mutex held.
++ */
++int lttng_statedump_start(struct lttng_session *session)
++{
++ return do_lttng_statedump(session);
++}
++EXPORT_SYMBOL_GPL(lttng_statedump_start);
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Jean-Hugues Deschenes");
++MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Statedump");
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-syscalls.c
+@@ -0,0 +1,459 @@
++/*
++ * lttng-syscalls.c
++ *
++ * LTTng syscall probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/compat.h>
++#include <asm/ptrace.h>
++#include <asm/syscall.h>
++
++#include "wrapper/tracepoint.h"
++#include "lttng-events.h"
++
++#ifndef CONFIG_COMPAT
++# ifndef is_compat_task
++# define is_compat_task() (0)
++# endif
++#endif
++
++static
++void syscall_entry_probe(void *__data, struct pt_regs *regs, long id);
++
++/*
++ * Forward declarations for old kernels.
++ */
++struct mmsghdr;
++struct rlimit64;
++struct oldold_utsname;
++struct old_utsname;
++struct sel_arg_struct;
++struct mmap_arg_struct;
++
++/*
++ * Take care of NOARGS not supported by mainline.
++ */
++#define DECLARE_EVENT_CLASS_NOARGS(name, tstruct, assign, print)
++#define DEFINE_EVENT_NOARGS(template, name)
++#define TRACE_EVENT_NOARGS(name, struct, assign, print)
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TP_MODULE_NOINIT
++#define TRACE_INCLUDE_PATH ../instrumentation/syscalls/headers
++
++#define PARAMS(args...) args
++
++/* Hijack probe callback for system calls */
++#undef TP_PROBE_CB
++#define TP_PROBE_CB(_template) &syscall_entry_probe
++#define SC_TRACE_EVENT(_name, _proto, _args, _struct, _assign, _printk) \
++ TRACE_EVENT(_name, PARAMS(_proto), PARAMS(_args),\
++ PARAMS(_struct), PARAMS(_assign), PARAMS(_printk))
++#define SC_DECLARE_EVENT_CLASS_NOARGS(_name, _struct, _assign, _printk) \
++ DECLARE_EVENT_CLASS_NOARGS(_name, PARAMS(_struct), PARAMS(_assign),\
++ PARAMS(_printk))
++#define SC_DEFINE_EVENT_NOARGS(_template, _name) \
++ DEFINE_EVENT_NOARGS(_template, _name)
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM syscalls_integers
++#include "instrumentation/syscalls/headers/syscalls_integers.h"
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM syscalls_pointers
++#include "instrumentation/syscalls/headers/syscalls_pointers.h"
++#undef TRACE_SYSTEM
++#undef SC_TRACE_EVENT
++#undef SC_DECLARE_EVENT_CLASS_NOARGS
++#undef SC_DEFINE_EVENT_NOARGS
++
++#define TRACE_SYSTEM syscalls_unknown
++#include "instrumentation/syscalls/headers/syscalls_unknown.h"
++#undef TRACE_SYSTEM
++
++/* For compat syscalls */
++#undef _TRACE_SYSCALLS_integers_H
++#undef _TRACE_SYSCALLS_pointers_H
++
++/* Hijack probe callback for system calls */
++#undef TP_PROBE_CB
++#define TP_PROBE_CB(_template) &syscall_entry_probe
++#define SC_TRACE_EVENT(_name, _proto, _args, _struct, _assign, _printk) \
++ TRACE_EVENT(compat_##_name, PARAMS(_proto), PARAMS(_args), \
++ PARAMS(_struct), PARAMS(_assign), \
++ PARAMS(_printk))
++#define SC_DECLARE_EVENT_CLASS_NOARGS(_name, _struct, _assign, _printk) \
++ DECLARE_EVENT_CLASS_NOARGS(compat_##_name, PARAMS(_struct), \
++ PARAMS(_assign), PARAMS(_printk))
++#define SC_DEFINE_EVENT_NOARGS(_template, _name) \
++ DEFINE_EVENT_NOARGS(compat_##_template, compat_##_name)
++#define TRACE_SYSTEM compat_syscalls_integers
++#include "instrumentation/syscalls/headers/compat_syscalls_integers.h"
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM compat_syscalls_pointers
++#include "instrumentation/syscalls/headers/compat_syscalls_pointers.h"
++#undef TRACE_SYSTEM
++#undef SC_TRACE_EVENT
++#undef SC_DECLARE_EVENT_CLASS_NOARGS
++#undef SC_DEFINE_EVENT_NOARGS
++#undef TP_PROBE_CB
++
++#undef TP_MODULE_NOINIT
++#undef LTTNG_PACKAGE_BUILD
++#undef CREATE_TRACE_POINTS
++
++struct trace_syscall_entry {
++ void *func;
++ const struct lttng_event_desc *desc;
++ const struct lttng_event_field *fields;
++ unsigned int nrargs;
++};
++
++#define CREATE_SYSCALL_TABLE
++
++#undef TRACE_SYSCALL_TABLE
++#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs) \
++ [ _nr ] = { \
++ .func = __event_probe__##_template, \
++ .nrargs = (_nrargs), \
++ .fields = __event_fields___##_template, \
++ .desc = &__event_desc___##_name, \
++ },
++
++static const struct trace_syscall_entry sc_table[] = {
++#include "instrumentation/syscalls/headers/syscalls_integers.h"
++#include "instrumentation/syscalls/headers/syscalls_pointers.h"
++};
++
++#undef TRACE_SYSCALL_TABLE
++#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs) \
++ [ _nr ] = { \
++ .func = __event_probe__##compat_##_template, \
++ .nrargs = (_nrargs), \
++ .fields = __event_fields___##compat_##_template,\
++ .desc = &__event_desc___##compat_##_name, \
++ },
++
++/* Create compatibility syscall table */
++const struct trace_syscall_entry compat_sc_table[] = {
++#include "instrumentation/syscalls/headers/compat_syscalls_integers.h"
++#include "instrumentation/syscalls/headers/compat_syscalls_pointers.h"
++};
++
++#undef CREATE_SYSCALL_TABLE
++
++static void syscall_entry_unknown(struct lttng_event *event,
++ struct pt_regs *regs, unsigned int id)
++{
++ unsigned long args[UNKNOWN_SYSCALL_NRARGS];
++
++ syscall_get_arguments(current, regs, 0, UNKNOWN_SYSCALL_NRARGS, args);
++ if (unlikely(is_compat_task()))
++ __event_probe__compat_sys_unknown(event, id, args);
++ else
++ __event_probe__sys_unknown(event, id, args);
++}
++
++void syscall_entry_probe(void *__data, struct pt_regs *regs, long id)
++{
++ struct lttng_channel *chan = __data;
++ struct lttng_event *event, *unknown_event;
++ const struct trace_syscall_entry *table, *entry;
++ size_t table_len;
++
++ if (unlikely(is_compat_task())) {
++ table = compat_sc_table;
++ table_len = ARRAY_SIZE(compat_sc_table);
++ unknown_event = chan->sc_compat_unknown;
++ } else {
++ table = sc_table;
++ table_len = ARRAY_SIZE(sc_table);
++ unknown_event = chan->sc_unknown;
++ }
++ if (unlikely(id >= table_len)) {
++ syscall_entry_unknown(unknown_event, regs, id);
++ return;
++ }
++ if (unlikely(is_compat_task()))
++ event = chan->compat_sc_table[id];
++ else
++ event = chan->sc_table[id];
++ if (unlikely(!event)) {
++ syscall_entry_unknown(unknown_event, regs, id);
++ return;
++ }
++ entry = &table[id];
++ WARN_ON_ONCE(!entry);
++
++ switch (entry->nrargs) {
++ case 0:
++ {
++ void (*fptr)(void *__data) = entry->func;
++
++ fptr(event);
++ break;
++ }
++ case 1:
++ {
++ void (*fptr)(void *__data, unsigned long arg0) = entry->func;
++ unsigned long args[1];
++
++ syscall_get_arguments(current, regs, 0, entry->nrargs, args);
++ fptr(event, args[0]);
++ break;
++ }
++ case 2:
++ {
++ void (*fptr)(void *__data,
++ unsigned long arg0,
++ unsigned long arg1) = entry->func;
++ unsigned long args[2];
++
++ syscall_get_arguments(current, regs, 0, entry->nrargs, args);
++ fptr(event, args[0], args[1]);
++ break;
++ }
++ case 3:
++ {
++ void (*fptr)(void *__data,
++ unsigned long arg0,
++ unsigned long arg1,
++ unsigned long arg2) = entry->func;
++ unsigned long args[3];
++
++ syscall_get_arguments(current, regs, 0, entry->nrargs, args);
++ fptr(event, args[0], args[1], args[2]);
++ break;
++ }
++ case 4:
++ {
++ void (*fptr)(void *__data,
++ unsigned long arg0,
++ unsigned long arg1,
++ unsigned long arg2,
++ unsigned long arg3) = entry->func;
++ unsigned long args[4];
++
++ syscall_get_arguments(current, regs, 0, entry->nrargs, args);
++ fptr(event, args[0], args[1], args[2], args[3]);
++ break;
++ }
++ case 5:
++ {
++ void (*fptr)(void *__data,
++ unsigned long arg0,
++ unsigned long arg1,
++ unsigned long arg2,
++ unsigned long arg3,
++ unsigned long arg4) = entry->func;
++ unsigned long args[5];
++
++ syscall_get_arguments(current, regs, 0, entry->nrargs, args);
++ fptr(event, args[0], args[1], args[2], args[3], args[4]);
++ break;
++ }
++ case 6:
++ {
++ void (*fptr)(void *__data,
++ unsigned long arg0,
++ unsigned long arg1,
++ unsigned long arg2,
++ unsigned long arg3,
++ unsigned long arg4,
++ unsigned long arg5) = entry->func;
++ unsigned long args[6];
++
++ syscall_get_arguments(current, regs, 0, entry->nrargs, args);
++ fptr(event, args[0], args[1], args[2],
++ args[3], args[4], args[5]);
++ break;
++ }
++ default:
++ break;
++ }
++}
++
++/* noinline to diminish caller stack size */
++static
++int fill_table(const struct trace_syscall_entry *table, size_t table_len,
++ struct lttng_event **chan_table, struct lttng_channel *chan, void *filter)
++{
++ const struct lttng_event_desc *desc;
++ unsigned int i;
++
++ /* Allocate events for each syscall, insert into table */
++ for (i = 0; i < table_len; i++) {
++ struct lttng_kernel_event ev;
++ desc = table[i].desc;
++
++ if (!desc) {
++ /* Unknown syscall */
++ continue;
++ }
++ /*
++ * Skip those already populated by previous failed
++ * register for this channel.
++ */
++ if (chan_table[i])
++ continue;
++ memset(&ev, 0, sizeof(ev));
++ strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN);
++ ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
++ ev.instrumentation = LTTNG_KERNEL_NOOP;
++ chan_table[i] = lttng_event_create(chan, &ev, filter,
++ desc);
++ if (!chan_table[i]) {
++ /*
++ * If something goes wrong in event registration
++ * after the first one, we have no choice but to
++ * leave the previous events in there, until
++ * deleted by session teardown.
++ */
++ return -EINVAL;
++ }
++ }
++ return 0;
++}
++
++int lttng_syscalls_register(struct lttng_channel *chan, void *filter)
++{
++ struct lttng_kernel_event ev;
++ int ret;
++
++ wrapper_vmalloc_sync_all();
++
++ if (!chan->sc_table) {
++ /* create syscall table mapping syscall to events */
++ chan->sc_table = kzalloc(sizeof(struct lttng_event *)
++ * ARRAY_SIZE(sc_table), GFP_KERNEL);
++ if (!chan->sc_table)
++ return -ENOMEM;
++ }
++
++#ifdef CONFIG_COMPAT
++ if (!chan->compat_sc_table) {
++ /* create syscall table mapping compat syscall to events */
++ chan->compat_sc_table = kzalloc(sizeof(struct lttng_event *)
++ * ARRAY_SIZE(compat_sc_table), GFP_KERNEL);
++ if (!chan->compat_sc_table)
++ return -ENOMEM;
++ }
++#endif
++ if (!chan->sc_unknown) {
++ const struct lttng_event_desc *desc =
++ &__event_desc___sys_unknown;
++
++ memset(&ev, 0, sizeof(ev));
++ strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN);
++ ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
++ ev.instrumentation = LTTNG_KERNEL_NOOP;
++ chan->sc_unknown = lttng_event_create(chan, &ev, filter,
++ desc);
++ if (!chan->sc_unknown) {
++ return -EINVAL;
++ }
++ }
++
++ if (!chan->sc_compat_unknown) {
++ const struct lttng_event_desc *desc =
++ &__event_desc___compat_sys_unknown;
++
++ memset(&ev, 0, sizeof(ev));
++ strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN);
++ ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
++ ev.instrumentation = LTTNG_KERNEL_NOOP;
++ chan->sc_compat_unknown = lttng_event_create(chan, &ev, filter,
++ desc);
++ if (!chan->sc_compat_unknown) {
++ return -EINVAL;
++ }
++ }
++
++ if (!chan->sc_exit) {
++ const struct lttng_event_desc *desc =
++ &__event_desc___exit_syscall;
++
++ memset(&ev, 0, sizeof(ev));
++ strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN);
++ ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
++ ev.instrumentation = LTTNG_KERNEL_NOOP;
++ chan->sc_exit = lttng_event_create(chan, &ev, filter,
++ desc);
++ if (!chan->sc_exit) {
++ return -EINVAL;
++ }
++ }
++
++ ret = fill_table(sc_table, ARRAY_SIZE(sc_table),
++ chan->sc_table, chan, filter);
++ if (ret)
++ return ret;
++#ifdef CONFIG_COMPAT
++ ret = fill_table(compat_sc_table, ARRAY_SIZE(compat_sc_table),
++ chan->compat_sc_table, chan, filter);
++ if (ret)
++ return ret;
++#endif
++ ret = kabi_2635_tracepoint_probe_register("sys_enter",
++ (void *) syscall_entry_probe, chan);
++ if (ret)
++ return ret;
++ /*
++ * We change the name of sys_exit tracepoint due to namespace
++ * conflict with sys_exit syscall entry.
++ */
++ ret = kabi_2635_tracepoint_probe_register("sys_exit",
++ (void *) __event_probe__exit_syscall,
++ chan->sc_exit);
++ if (ret) {
++ WARN_ON_ONCE(kabi_2635_tracepoint_probe_unregister("sys_enter",
++ (void *) syscall_entry_probe, chan));
++ }
++ return ret;
++}
++
++/*
++ * Only called at session destruction.
++ */
++int lttng_syscalls_unregister(struct lttng_channel *chan)
++{
++ int ret;
++
++ if (!chan->sc_table)
++ return 0;
++ ret = kabi_2635_tracepoint_probe_unregister("sys_exit",
++ (void *) __event_probe__exit_syscall,
++ chan->sc_exit);
++ if (ret)
++ return ret;
++ ret = kabi_2635_tracepoint_probe_unregister("sys_enter",
++ (void *) syscall_entry_probe, chan);
++ if (ret)
++ return ret;
++ /* lttng_event destroy will be performed by lttng_session_destroy() */
++ kfree(chan->sc_table);
++#ifdef CONFIG_COMPAT
++ kfree(chan->compat_sc_table);
++#endif
++ return 0;
++}
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-tracer-core.h
+@@ -0,0 +1,41 @@
++#ifndef LTTNG_TRACER_CORE_H
++#define LTTNG_TRACER_CORE_H
++
++/*
++ * lttng-tracer-core.h
++ *
++ * This contains the core definitions for the Linux Trace Toolkit Next
++ * Generation tracer.
++ *
++ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/list.h>
++#include <linux/percpu.h>
++
++#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
++/* Align data on its natural alignment */
++#define RING_BUFFER_ALIGN
++#endif
++
++#include "wrapper/ringbuffer/config.h"
++
++struct lttng_session;
++struct lttng_channel;
++struct lttng_event;
++
++#endif /* LTTNG_TRACER_CORE_H */
+--- /dev/null
++++ b/drivers/staging/lttng/lttng-tracer.h
+@@ -0,0 +1,81 @@
++#ifndef _LTTNG_TRACER_H
++#define _LTTNG_TRACER_H
++
++/*
++ * lttng-tracer.h
++ *
++ * This contains the definitions for the Linux Trace Toolkit Next
++ * Generation tracer.
++ *
++ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <stdarg.h>
++#include <linux/types.h>
++#include <linux/limits.h>
++#include <linux/list.h>
++#include <linux/cache.h>
++#include <linux/timex.h>
++#include <linux/wait.h>
++#include <asm/atomic.h>
++#include <asm/local.h>
++
++#include "wrapper/trace-clock.h"
++#include "wrapper/compiler.h"
++#include "lttng-tracer-core.h"
++#include "lttng-events.h"
++
++#define LTTNG_MODULES_MAJOR_VERSION 2
++#define LTTNG_MODULES_MINOR_VERSION 3
++#define LTTNG_MODULES_PATCHLEVEL_VERSION 4
++
++#define LTTNG_VERSION_NAME "Dominus Vobiscum"
++#define LTTNG_VERSION_DESCRIPTION \
++ "A very succulent line-up of beers brewed at Microbrasserie Charlevoix. Elaborated starting from special malts and fermented with a Belgian yeast. These beers are refermented in bottle and will make you discover the richness of wheat, amber and triple styles."
++
++#ifndef CHAR_BIT
++#define CHAR_BIT 8
++#endif
++
++/* Number of bytes to log with a read/write event */
++#define LTTNG_LOG_RW_SIZE 32L
++#define LTTNG_MAX_SMALL_SIZE 0xFFFFU
++
++#ifdef RING_BUFFER_ALIGN
++#define lttng_alignof(type) __alignof__(type)
++#else
++#define lttng_alignof(type) 1
++#endif
++
++/* Tracer properties */
++#define CTF_MAGIC_NUMBER 0xC1FC1FC1
++#define TSDL_MAGIC_NUMBER 0x75D11D57
++
++/* CTF specification version followed */
++#define CTF_SPEC_MAJOR 1
++#define CTF_SPEC_MINOR 8
++
++/*
++ * Number of milliseconds to retry before failing metadata writes on buffer full
++ * condition. (10 seconds)
++ */
++#define LTTNG_METADATA_TIMEOUT_MSEC 10000
++
++#define LTTNG_RFLAG_EXTENDED RING_BUFFER_RFLAG_END
++#define LTTNG_RFLAG_END (LTTNG_RFLAG_EXTENDED << 1)
++
++#endif /* _LTTNG_TRACER_H */
+--- /dev/null
++++ b/drivers/staging/lttng/probes/Makefile
+@@ -0,0 +1,240 @@
++#
++# Makefile for the LTT probes.
++# Only build from the package top-level directory. Never use with make directly.
++
++ifneq ($(KERNELRELEASE),)
++ifneq ($(CONFIG_TRACEPOINTS),)
++
++ccflags-y += -I$(PWD)/probes
++obj-m += lttng-types.o
++
++obj-m += lttng-probe-sched.o
++obj-m += lttng-probe-irq.o
++obj-m += lttng-probe-timer.o
++obj-m += lttng-probe-kmem.o
++obj-m += lttng-probe-module.o
++obj-m += lttng-probe-power.o
++
++obj-m += lttng-probe-statedump.o
++
++ifneq ($(CONFIG_KVM),)
++obj-m += lttng-probe-kvm.o
++ifneq ($(CONFIG_X86),)
++kvm_dep = $(srctree)/virt/kvm/iodev.h
++ifneq ($(wildcard $(kvm_dep)),)
++CFLAGS_lttng-probe-kvm-x86.o += -I$(srctree)/virt/kvm
++CFLAGS_lttng-probe-kvm-x86-mmu.o += -I$(srctree)/virt/kvm
++obj-m += lttng-probe-kvm-x86.o
++obj-m += lttng-probe-kvm-x86-mmu.o
++else
++$(warning File $(kvm_dep) not found. Probe "kvm" x86-specific is disabled. Use full kernel source tree to enable it.)
++endif
++endif
++endif
++
++obj-m += $(shell \
++ if [ $(VERSION) -ge 3 \
++ -o \( $(VERSION) -eq 2 -a $(PATCHLEVEL) -ge 6 -a $(SUBLEVEL) -ge 33 \) ] ; then \
++ echo "lttng-probe-signal.o" ; fi;)
++
++ifneq ($(CONFIG_BLOCK),)
++ifneq ($(CONFIG_EVENT_TRACING),) # need blk_cmd_buf_len
++obj-m += lttng-probe-block.o
++endif
++endif
++
++ifneq ($(CONFIG_NET),)
++obj-m += lttng-probe-napi.o
++obj-m += lttng-probe-skb.o
++obj-m += $(shell \
++ if [ $(VERSION) -ge 3 \
++ -o \( $(VERSION) -eq 2 -a $(PATCHLEVEL) -ge 6 -a $(SUBLEVEL) -ge 37 \) ] ; then \
++ echo "lttng-probe-net.o" ; fi;)
++obj-m += $(shell \
++ if [ $(VERSION) -ge 3 -a $(PATCHLEVEL) -ge 1 ] ; then \
++ echo "lttng-probe-sock.o" ; fi;)
++obj-m += $(shell \
++ if [ $(VERSION) -ge 3 -a $(PATCHLEVEL) -ge 1 ] ; then \
++ echo "lttng-probe-udp.o" ; fi;)
++endif
++
++ifneq ($(CONFIG_SND_SOC),)
++obj-m += $(shell \
++ if [ $(VERSION) -ge 3 \
++ -o \( $(VERSION) -eq 2 -a $(PATCHLEVEL) -ge 6 -a $(SUBLEVEL) -ge 38 \) ] ; then \
++ echo "lttng-probe-asoc.o" ; fi;)
++endif
++
++ifneq ($(CONFIG_EXT3_FS),)
++ext3_dep = $(srctree)/fs/ext3/*.h
++ext3_dep_check = $(wildcard $(ext3_dep))
++ext3 = $(shell \
++ if [ $(VERSION) -ge 3 -a $(PATCHLEVEL) -ge 1 ] ; then \
++ if [ $(VERSION) -ge 3 -a $(PATCHLEVEL) -ge 4 -a \
++ -z "$(ext3_dep_check)" ] ; then \
++ echo "warn" ; \
++ exit ; \
++ fi; \
++ echo "lttng-probe-ext3.o" ; \
++ fi;)
++ifeq ($(ext3),warn)
++$(warning Files $(ext3_dep) not found. Probe "ext3" is disabled. Use full kernel source tree to enable it.)
++ext3 =
++endif
++obj-m += $(ext3)
++endif
++
++ifneq ($(CONFIG_GPIOLIB),)
++obj-m += $(shell \
++ if [ $(VERSION) -ge 3 ] ; then \
++ echo "lttng-probe-gpio.o" ; fi;)
++endif
++
++ifneq ($(CONFIG_JBD2),)
++obj-m += lttng-probe-jbd2.o
++endif
++
++ifneq ($(CONFIG_JBD),)
++obj-m += $(shell \
++ if [ $(VERSION) -ge 3 -a $(PATCHLEVEL) -ge 1 ] ; then \
++ echo "lttng-probe-jbd.o" ; fi;)
++endif
++
++ifneq ($(CONFIG_REGULATOR),)
++obj-m += $(shell \
++ if [ $(VERSION) -ge 3 \
++ -o \( $(VERSION) -eq 2 -a $(PATCHLEVEL) -ge 6 -a $(SUBLEVEL) -ge 38 \) ] ; then \
++ echo "lttng-probe-regulator.o" ; fi;)
++endif
++
++ifneq ($(CONFIG_SCSI),)
++obj-m += $(shell \
++ if [ $(VERSION) -ge 3 \
++ -o \( $(VERSION) -eq 2 -a $(PATCHLEVEL) -ge 6 -a $(SUBLEVEL) -ge 35 \) ] ; then \
++ echo "lttng-probe-scsi.o" ; fi;)
++endif
++
++vmscan = $(shell \
++ if [ $(VERSION) -ge 3 \
++ -o \( $(VERSION) -eq 2 -a $(PATCHLEVEL) -ge 6 -a $(SUBLEVEL) -ge 36 \) ] ; then \
++ echo "lttng-probe-vmscan.o" ; fi;)
++ifneq ($(CONFIG_SWAP),)
++ obj-m += $(vmscan)
++else
++ifneq ($(CONFIG_CGROUP_MEM_RES_CTLR),)
++ obj-m += $(vmscan)
++endif
++endif
++
++# lock probe does not work, so disabling it for now
++#ifneq ($(CONFIG_LOCKDEP),)
++#obj-m += lttng-probe-lock.o
++#endif
++
++ifneq ($(CONFIG_BTRFS_FS),)
++btrfs_dep = $(srctree)/fs/btrfs/*.h
++btrfs = $(shell \
++ if [ $(VERSION) -ge 3 \
++ -o \( $(VERSION) -eq 2 -a $(PATCHLEVEL) -ge 6 -a $(SUBLEVEL) -ge 39 \) ] ; then \
++ echo "lttng-probe-btrfs.o" ; fi;)
++ifneq ($(btrfs),)
++ifeq ($(wildcard $(btrfs_dep)),)
++$(warning Files $(btrfs_dep) not found. Probe "btrfs" is disabled. Use full kernel source tree to enable it.)
++btrfs =
++endif
++endif
++obj-m += $(btrfs)
++endif
++
++obj-m += $(shell \
++ if [ $(VERSION) -ge 3 \
++ -o \( $(VERSION) -eq 2 -a $(PATCHLEVEL) -ge 6 -a $(SUBLEVEL) -ge 38 \) ] ; then \
++ echo "lttng-probe-compaction.o" ; fi;)
++
++ifneq ($(CONFIG_EXT4_FS),)
++ext4_dep = $(srctree)/fs/ext4/*.h
++ext4 = lttng-probe-ext4.o
++ifeq ($(wildcard $(ext4_dep)),)
++$(warning Files $(ext4_dep) not found. Probe "ext4" is disabled. Use full kernel source tree to enable it.)
++ext4 =
++endif
++obj-m += $(ext4)
++endif
++
++obj-m += $(shell \
++ if [ $(VERSION) -ge 3 -a $(PATCHLEVEL) -ge 4 ] ; then \
++ echo "lttng-probe-printk.o" ; fi;)
++ifneq ($(CONFIG_FRAME_WARN),0)
++CFLAGS_lttng-probe-printk.o += -Wframe-larger-than=2200
++endif
++
++obj-m += $(shell \
++ if [ \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -ge 6 \) \
++ -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 5 -a $(SUBLEVEL) -ge 2 \) \
++ -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 4 -a $(SUBLEVEL) -ge 9 \) \
++ -o \( $(VERSION) -eq 3 -a $(PATCHLEVEL) -eq 0 -a $(SUBLEVEL) -ge 41 \) ] ; then \
++ echo "lttng-probe-random.o" ; fi;)
++
++obj-m += $(shell \
++ if [ $(VERSION) -ge 3 -a $(PATCHLEVEL) -ge 2 ] ; then \
++ echo "lttng-probe-rcu.o" ; fi;)
++
++ifneq ($(CONFIG_REGMAP),)
++obj-m += $(shell \
++ if [ $(VERSION) -ge 3 -a $(PATCHLEVEL) -ge 2 ] ; then \
++ echo "lttng-probe-regmap.o" ; fi;)
++endif
++
++ifneq ($(CONFIG_PM_RUNTIME),)
++obj-m += $(shell \
++ if [ $(VERSION) -ge 3 -a $(PATCHLEVEL) -ge 2 ] ; then \
++ echo "lttng-probe-rpm.o" ; fi;)
++endif
++
++ifneq ($(CONFIG_SUNRPC),)
++obj-m += $(shell \
++ if [ $(VERSION) -ge 3 -a $(PATCHLEVEL) -ge 4 ] ; then \
++ echo "lttng-probe-sunrpc.o" ; fi;)
++endif
++
++obj-m += lttng-probe-workqueue.o
++
++ifneq ($(CONFIG_KALLSYMS_ALL),)
++obj-m += $(shell \
++ if [ $(VERSION) -ge 3 \
++ -o \( $(VERSION) -eq 2 -a $(PATCHLEVEL) -ge 6 -a $(SUBLEVEL) -ge 36 \) ] ; then \
++ echo "lttng-probe-writeback.o" ; fi;)
++endif
++
++
++ifneq ($(CONFIG_KPROBES),)
++obj-m += lttng-kprobes.o
++endif
++
++
++ifneq ($(CONFIG_KRETPROBES),)
++obj-m += lttng-kretprobes.o
++endif
++
++ifneq ($(CONFIG_DYNAMIC_FTRACE),)
++obj-m += lttng-ftrace.o
++endif
++
++endif
++
++else
++ KERNELDIR ?= /lib/modules/$(shell uname -r)/build
++ PWD := $(shell pwd)
++ CFLAGS = $(EXTCFLAGS)
++
++default:
++ $(MAKE) -C $(KERNELDIR) M=$(PWD) modules
++
++modules_install:
++ $(MAKE) -C $(KERNELDIR) M=$(PWD) modules_install
++ /sbin/depmod -a
++
++clean:
++ $(MAKE) -C $(KERNELDIR) M=$(PWD) clean
++
++endif
+--- /dev/null
++++ b/drivers/staging/lttng/probes/define_trace.h
+@@ -0,0 +1,180 @@
++/*
++ * define_trace.h
++ *
++ * Copyright (C) 2009 Steven Rostedt <rostedt@goodmis.org>
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++/*
++ * Trace files that want to automate creationg of all tracepoints defined
++ * in their file should include this file. The following are macros that the
++ * trace file may define:
++ *
++ * TRACE_SYSTEM defines the system the tracepoint is for
++ *
++ * TRACE_INCLUDE_FILE if the file name is something other than TRACE_SYSTEM.h
++ * This macro may be defined to tell define_trace.h what file to include.
++ * Note, leave off the ".h".
++ *
++ * TRACE_INCLUDE_PATH if the path is something other than core kernel include/trace
++ * then this macro can define the path to use. Note, the path is relative to
++ * define_trace.h, not the file including it. Full path names for out of tree
++ * modules must be used.
++ */
++
++#ifdef CREATE_TRACE_POINTS
++
++/* Prevent recursion */
++#undef CREATE_TRACE_POINTS
++
++#include <linux/stringify.h>
++/*
++ * module.h includes tracepoints, and because ftrace.h
++ * pulls in module.h:
++ * trace/ftrace.h -> linux/ftrace_event.h -> linux/perf_event.h ->
++ * linux/ftrace.h -> linux/module.h
++ * we must include module.h here before we play with any of
++ * the TRACE_EVENT() macros, otherwise the tracepoints included
++ * by module.h may break the build.
++ */
++#include <linux/module.h>
++
++#undef TRACE_EVENT_MAP
++#define TRACE_EVENT_MAP(name, map, proto, args, tstruct, assign, print) \
++ DEFINE_TRACE(name)
++
++#undef TRACE_EVENT_CONDITION_MAP
++#define TRACE_EVENT_CONDITION_MAP(name, map, proto, args, cond, tstruct, assign, print) \
++ TRACE_EVENT(name, \
++ PARAMS(proto), \
++ PARAMS(args), \
++ PARAMS(tstruct), \
++ PARAMS(assign), \
++ PARAMS(print))
++
++#undef TRACE_EVENT_FN_MAP
++#define TRACE_EVENT_FN_MAP(name, map, proto, args, tstruct, \
++ assign, print, reg, unreg) \
++ DEFINE_TRACE_FN(name, reg, unreg)
++
++#undef DEFINE_EVENT_MAP
++#define DEFINE_EVENT_MAP(template, name, map, proto, args) \
++ DEFINE_TRACE(name)
++
++#undef DEFINE_EVENT_PRINT_MAP
++#define DEFINE_EVENT_PRINT_MAP(template, name, map, proto, args, print) \
++ DEFINE_TRACE(name)
++
++#undef DEFINE_EVENT_CONDITION_MAP
++#define DEFINE_EVENT_CONDITION_MAP(template, name, map, proto, args, cond) \
++ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
++
++
++#undef TRACE_EVENT
++#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
++ DEFINE_TRACE(name)
++
++#undef TRACE_EVENT_CONDITION
++#define TRACE_EVENT_CONDITION(name, proto, args, cond, tstruct, assign, print) \
++ TRACE_EVENT(name, \
++ PARAMS(proto), \
++ PARAMS(args), \
++ PARAMS(tstruct), \
++ PARAMS(assign), \
++ PARAMS(print))
++
++#undef TRACE_EVENT_FN
++#define TRACE_EVENT_FN(name, proto, args, tstruct, \
++ assign, print, reg, unreg) \
++ DEFINE_TRACE_FN(name, reg, unreg)
++
++#undef DEFINE_EVENT
++#define DEFINE_EVENT(template, name, proto, args) \
++ DEFINE_TRACE(name)
++
++#undef DEFINE_EVENT_PRINT
++#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
++ DEFINE_TRACE(name)
++
++#undef DEFINE_EVENT_CONDITION
++#define DEFINE_EVENT_CONDITION(template, name, proto, args, cond) \
++ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
++
++#undef DECLARE_TRACE
++#define DECLARE_TRACE(name, proto, args) \
++ DEFINE_TRACE(name)
++
++#undef TRACE_INCLUDE
++#undef __TRACE_INCLUDE
++
++#ifndef TRACE_INCLUDE_FILE
++# define TRACE_INCLUDE_FILE TRACE_SYSTEM
++# define UNDEF_TRACE_INCLUDE_FILE
++#endif
++
++#ifndef TRACE_INCLUDE_PATH
++# define __TRACE_INCLUDE(system) <trace/events/system.h>
++# define UNDEF_TRACE_INCLUDE_PATH
++#else
++# define __TRACE_INCLUDE(system) __stringify(TRACE_INCLUDE_PATH/system.h)
++#endif
++
++# define TRACE_INCLUDE(system) __TRACE_INCLUDE(system)
++
++/* Let the trace headers be reread */
++#define TRACE_HEADER_MULTI_READ
++
++#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
++
++/* Make all open coded DECLARE_TRACE nops */
++#undef DECLARE_TRACE
++#define DECLARE_TRACE(name, proto, args)
++
++#ifdef LTTNG_PACKAGE_BUILD
++#include "lttng-events.h"
++#endif
++
++#undef TRACE_EVENT
++#undef TRACE_EVENT_FN
++#undef TRACE_EVENT_CONDITION
++#undef DEFINE_EVENT
++#undef DEFINE_EVENT_PRINT
++#undef DEFINE_EVENT_CONDITION
++#undef TRACE_EVENT_MAP
++#undef TRACE_EVENT_FN_MAP
++#undef TRACE_EVENT_CONDITION_MAP
++#undef DECLARE_EVENT_CLASS
++#undef DEFINE_EVENT_MAP
++#undef DEFINE_EVENT_PRINT_MAP
++#undef DEFINE_EVENT_CONDITION_MAP
++#undef TRACE_HEADER_MULTI_READ
++
++/* Only undef what we defined in this file */
++#ifdef UNDEF_TRACE_INCLUDE_FILE
++# undef TRACE_INCLUDE_FILE
++# undef UNDEF_TRACE_INCLUDE_FILE
++#endif
++
++#ifdef UNDEF_TRACE_INCLUDE_PATH
++# undef TRACE_INCLUDE_PATH
++# undef UNDEF_TRACE_INCLUDE_PATH
++#endif
++
++/* We may be processing more files */
++#define CREATE_TRACE_POINTS
++
++#endif /* CREATE_TRACE_POINTS */
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-events-reset.h
+@@ -0,0 +1,99 @@
++/*
++ * lttng-events-reset.h
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++/* Reset macros used within TRACE_EVENT to "nothing" */
++
++#undef __field_full
++#define __field_full(_type, _item, _order, _base)
++
++#undef __array_enc_ext
++#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding)
++
++#undef __dynamic_array_enc_ext
++#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)
++
++#undef __dynamic_array_enc_ext_2
++#define __dynamic_array_enc_ext_2(_type, _item, _length1, _length2, _order, _base, _encoding)
++
++#undef __dynamic_array_len
++#define __dynamic_array_len(_type, _item, _length)
++
++#undef __string
++#define __string(_item, _src)
++
++#undef tp_assign
++#define tp_assign(dest, src)
++
++#undef tp_memcpy
++#define tp_memcpy(dest, src, len)
++
++#undef tp_memcpy_dyn
++#define tp_memcpy_dyn(dest, src, len)
++
++#undef tp_strcpy
++#define tp_strcpy(dest, src)
++
++#undef __get_str
++#define __get_str(field)
++
++#undef __get_dynamic_array
++#define __get_dynamic_array(field)
++
++#undef __get_dynamic_array_len
++#define __get_dynamic_array_len(field)
++
++#undef TP_PROTO
++#define TP_PROTO(args...)
++
++#undef TP_ARGS
++#define TP_ARGS(args...)
++
++#undef TP_STRUCT__entry
++#define TP_STRUCT__entry(args...)
++
++#undef TP_fast_assign
++#define TP_fast_assign(args...)
++
++#undef __perf_count
++#define __perf_count(args...)
++
++#undef __perf_addr
++#define __perf_addr(args...)
++
++#undef TP_perf_assign
++#define TP_perf_assign(args...)
++
++#undef TP_printk
++#define TP_printk(args...)
++
++#undef DECLARE_EVENT_CLASS
++#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print)
++
++#undef DECLARE_EVENT_CLASS_NOARGS
++#define DECLARE_EVENT_CLASS_NOARGS(_name, _tstruct, _assign, _print)
++
++#undef DEFINE_EVENT_MAP
++#define DEFINE_EVENT_MAP(_template, _name, _map, _proto, _args)
++
++#undef DEFINE_EVENT_MAP_NOARGS
++#define DEFINE_EVENT_MAP_NOARGS(_template, _name, _map)
++
++#undef TRACE_EVENT_FLAGS
++#define TRACE_EVENT_FLAGS(name, value)
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-events.h
+@@ -0,0 +1,868 @@
++/*
++ * lttng-events.h
++ *
++ * Copyright (C) 2009 Steven Rostedt <rostedt@goodmis.org>
++ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++#include <linux/uaccess.h>
++#include <linux/debugfs.h>
++#include "lttng.h"
++#include "lttng-types.h"
++#include "lttng-probe-user.h"
++#include "../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
++#include "../wrapper/ringbuffer/frontend_types.h"
++#include "../lttng-events.h"
++#include "../lttng-tracer-core.h"
++
++/*
++ * Macro declarations used for all stages.
++ */
++
++/*
++ * LTTng name mapping macros. LTTng remaps some of the kernel events to
++ * enforce name-spacing.
++ */
++#undef TRACE_EVENT_MAP
++#define TRACE_EVENT_MAP(name, map, proto, args, tstruct, assign, print) \
++ DECLARE_EVENT_CLASS(map, \
++ PARAMS(proto), \
++ PARAMS(args), \
++ PARAMS(tstruct), \
++ PARAMS(assign), \
++ PARAMS(print)) \
++ DEFINE_EVENT_MAP(map, name, map, PARAMS(proto), PARAMS(args))
++
++#undef TRACE_EVENT_MAP_NOARGS
++#define TRACE_EVENT_MAP_NOARGS(name, map, tstruct, assign, print) \
++ DECLARE_EVENT_CLASS_NOARGS(map, \
++ PARAMS(tstruct), \
++ PARAMS(assign), \
++ PARAMS(print)) \
++ DEFINE_EVENT_MAP_NOARGS(map, name, map)
++
++#undef DEFINE_EVENT_PRINT_MAP
++#define DEFINE_EVENT_PRINT_MAP(template, name, map, proto, args, print) \
++ DEFINE_EVENT_MAP(template, name, map, PARAMS(proto), PARAMS(args))
++
++/* Callbacks are meaningless to LTTng. */
++#undef TRACE_EVENT_FN_MAP
++#define TRACE_EVENT_FN_MAP(name, map, proto, args, tstruct, \
++ assign, print, reg, unreg) \
++ TRACE_EVENT_MAP(name, map, PARAMS(proto), PARAMS(args), \
++ PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
++
++#undef TRACE_EVENT_CONDITION_MAP
++#define TRACE_EVENT_CONDITION_MAP(name, map, proto, args, cond, tstruct, assign, print) \
++ TRACE_EVENT_MAP(name, map, \
++ PARAMS(proto), \
++ PARAMS(args), \
++ PARAMS(tstruct), \
++ PARAMS(assign), \
++ PARAMS(print))
++
++/*
++ * DECLARE_EVENT_CLASS can be used to add a generic function
++ * handlers for events. That is, if all events have the same
++ * parameters and just have distinct trace points.
++ * Each tracepoint can be defined with DEFINE_EVENT and that
++ * will map the DECLARE_EVENT_CLASS to the tracepoint.
++ *
++ * TRACE_EVENT is a one to one mapping between tracepoint and template.
++ */
++
++#undef TRACE_EVENT
++#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
++ TRACE_EVENT_MAP(name, name, \
++ PARAMS(proto), \
++ PARAMS(args), \
++ PARAMS(tstruct), \
++ PARAMS(assign), \
++ PARAMS(print))
++
++#undef TRACE_EVENT_NOARGS
++#define TRACE_EVENT_NOARGS(name, tstruct, assign, print) \
++ TRACE_EVENT_MAP_NOARGS(name, name, \
++ PARAMS(tstruct), \
++ PARAMS(assign), \
++ PARAMS(print))
++
++#undef DEFINE_EVENT_PRINT
++#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
++ DEFINE_EVENT_PRINT_MAP(template, name, name, \
++ PARAMS(proto), PARAMS(args), PARAMS(print_))
++
++#undef TRACE_EVENT_FN
++#define TRACE_EVENT_FN(name, proto, args, tstruct, \
++ assign, print, reg, unreg) \
++ TRACE_EVENT_FN_MAP(name, name, PARAMS(proto), PARAMS(args), \
++ PARAMS(tstruct), PARAMS(assign), PARAMS(print), \
++ PARAMS(reg), PARAMS(unreg)) \
++
++#undef DEFINE_EVENT
++#define DEFINE_EVENT(template, name, proto, args) \
++ DEFINE_EVENT_MAP(template, name, name, PARAMS(proto), PARAMS(args))
++
++#undef DEFINE_EVENT_NOARGS
++#define DEFINE_EVENT_NOARGS(template, name) \
++ DEFINE_EVENT_MAP_NOARGS(template, name, name)
++
++#undef TRACE_EVENT_CONDITION
++#define TRACE_EVENT_CONDITION(name, proto, args, cond, tstruct, assign, print) \
++ TRACE_EVENT_CONDITION_MAP(name, name, \
++ PARAMS(proto), \
++ PARAMS(args), \
++ PARAMS(cond), \
++ PARAMS(tstruct), \
++ PARAMS(assign), \
++ PARAMS(print))
++
++/*
++ * Stage 1 of the trace events.
++ *
++ * Create dummy trace calls for each events, verifying that the LTTng module
++ * TRACE_EVENT headers match the kernel arguments. Will be optimized out by the
++ * compiler.
++ */
++
++#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
++
++#undef TP_PROTO
++#define TP_PROTO(args...) args
++
++#undef TP_ARGS
++#define TP_ARGS(args...) args
++
++#undef DEFINE_EVENT_MAP
++#define DEFINE_EVENT_MAP(_template, _name, _map, _proto, _args) \
++void trace_##_name(_proto);
++
++#undef DEFINE_EVENT_MAP_NOARGS
++#define DEFINE_EVENT_MAP_NOARGS(_template, _name, _map) \
++void trace_##_name(void *__data);
++
++#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
++
++/*
++ * Stage 2 of the trace events.
++ *
++ * Create event field type metadata section.
++ * Each event produce an array of fields.
++ */
++
++#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
++
++/* Named field types must be defined in lttng-types.h */
++
++#undef __field_full
++#define __field_full(_type, _item, _order, _base) \
++ { \
++ .name = #_item, \
++ .type = __type_integer(_type, _order, _base, none), \
++ },
++
++#undef __field
++#define __field(_type, _item) \
++ __field_full(_type, _item, __BYTE_ORDER, 10)
++
++#undef __field_ext
++#define __field_ext(_type, _item, _filter_type) \
++ __field(_type, _item)
++
++#undef __field_hex
++#define __field_hex(_type, _item) \
++ __field_full(_type, _item, __BYTE_ORDER, 16)
++
++#undef __field_network
++#define __field_network(_type, _item) \
++ __field_full(_type, _item, __BIG_ENDIAN, 10)
++
++#undef __field_network_hex
++#define __field_network_hex(_type, _item) \
++ __field_full(_type, _item, __BIG_ENDIAN, 16)
++
++#undef __array_enc_ext
++#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
++ { \
++ .name = #_item, \
++ .type = \
++ { \
++ .atype = atype_array, \
++ .u.array = \
++ { \
++ .length = _length, \
++ .elem_type = __type_integer(_type, _order, _base, _encoding), \
++ }, \
++ }, \
++ },
++
++#undef __array
++#define __array(_type, _item, _length) \
++ __array_enc_ext(_type, _item, _length, __BYTE_ORDER, 10, none)
++
++#undef __array_text
++#define __array_text(_type, _item, _length) \
++ __array_enc_ext(_type, _item, _length, __BYTE_ORDER, 10, UTF8)
++
++#undef __array_hex
++#define __array_hex(_type, _item, _length) \
++ __array_enc_ext(_type, _item, _length, __BYTE_ORDER, 16, none)
++
++#undef __dynamic_array_enc_ext
++#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding) \
++ { \
++ .name = #_item, \
++ .type = \
++ { \
++ .atype = atype_sequence, \
++ .u.sequence = \
++ { \
++ .length_type = __type_integer(u32, __BYTE_ORDER, 10, none), \
++ .elem_type = __type_integer(_type, _order, _base, _encoding), \
++ }, \
++ }, \
++ },
++
++#undef __dynamic_array_enc_ext_2
++#define __dynamic_array_enc_ext_2(_type, _item, _length1, _length2, _order, _base, _encoding) \
++ __dynamic_array_enc_ext(_type, _item, _length1 + _length2, _order, _base, _encoding)
++
++#undef __dynamic_array
++#define __dynamic_array(_type, _item, _length) \
++ __dynamic_array_enc_ext(_type, _item, _length, __BYTE_ORDER, 10, none)
++
++#undef __dynamic_array_text
++#define __dynamic_array_text(_type, _item, _length) \
++ __dynamic_array_enc_ext(_type, _item, _length, __BYTE_ORDER, 10, UTF8)
++
++#undef __dynamic_array_hex
++#define __dynamic_array_hex(_type, _item, _length) \
++ __dynamic_array_enc_ext(_type, _item, _length, __BYTE_ORDER, 16, none)
++
++#undef __dynamic_array_text_2
++#define __dynamic_array_text_2(_type, _item, _length1, _length2) \
++ __dynamic_array_enc_ext_2(_type, _item, _length1, _length2, __BYTE_ORDER, 10, UTF8)
++
++#undef __string
++#define __string(_item, _src) \
++ { \
++ .name = #_item, \
++ .type = \
++ { \
++ .atype = atype_string, \
++ .u.basic.string.encoding = lttng_encode_UTF8, \
++ }, \
++ },
++
++#undef __string_from_user
++#define __string_from_user(_item, _src) \
++ __string(_item, _src)
++
++#undef TP_STRUCT__entry
++#define TP_STRUCT__entry(args...) args /* Only one used in this phase */
++
++#undef DECLARE_EVENT_CLASS_NOARGS
++#define DECLARE_EVENT_CLASS_NOARGS(_name, _tstruct, _assign, _print) \
++ static const struct lttng_event_field __event_fields___##_name[] = { \
++ _tstruct \
++ };
++
++#undef DECLARE_EVENT_CLASS
++#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
++ DECLARE_EVENT_CLASS_NOARGS(_name, PARAMS(_tstruct), PARAMS(_assign), \
++ PARAMS(_print))
++
++#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
++
++/*
++ * Stage 3 of the trace events.
++ *
++ * Create probe callback prototypes.
++ */
++
++#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
++
++#undef TP_PROTO
++#define TP_PROTO(args...) args
++
++#undef DECLARE_EVENT_CLASS
++#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
++static void __event_probe__##_name(void *__data, _proto);
++
++#undef DECLARE_EVENT_CLASS_NOARGS
++#define DECLARE_EVENT_CLASS_NOARGS(_name, _tstruct, _assign, _print) \
++static void __event_probe__##_name(void *__data);
++
++#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
++
++/*
++ * Stage 3.9 of the trace events.
++ *
++ * Create event descriptions.
++ */
++
++/* Named field types must be defined in lttng-types.h */
++
++#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
++
++#ifndef TP_PROBE_CB
++#define TP_PROBE_CB(_template) &__event_probe__##_template
++#endif
++
++#undef DEFINE_EVENT_MAP_NOARGS
++#define DEFINE_EVENT_MAP_NOARGS(_template, _name, _map) \
++static const struct lttng_event_desc __event_desc___##_map = { \
++ .fields = __event_fields___##_template, \
++ .name = #_map, \
++ .probe_callback = (void *) TP_PROBE_CB(_template), \
++ .nr_fields = ARRAY_SIZE(__event_fields___##_template), \
++ .owner = THIS_MODULE, \
++};
++
++#undef DEFINE_EVENT_MAP
++#define DEFINE_EVENT_MAP(_template, _name, _map, _proto, _args) \
++ DEFINE_EVENT_MAP_NOARGS(_template, _name, _map)
++
++#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
++
++
++/*
++ * Stage 4 of the trace events.
++ *
++ * Create an array of event description pointers.
++ */
++
++/* Named field types must be defined in lttng-types.h */
++
++#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
++
++#undef DEFINE_EVENT_MAP_NOARGS
++#define DEFINE_EVENT_MAP_NOARGS(_template, _name, _map) \
++ &__event_desc___##_map,
++
++#undef DEFINE_EVENT_MAP
++#define DEFINE_EVENT_MAP(_template, _name, _map, _proto, _args) \
++ DEFINE_EVENT_MAP_NOARGS(_template, _name, _map)
++
++#define TP_ID1(_token, _system) _token##_system
++#define TP_ID(_token, _system) TP_ID1(_token, _system)
++
++static const struct lttng_event_desc *TP_ID(__event_desc___, TRACE_SYSTEM)[] = {
++#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
++};
++
++#undef TP_ID1
++#undef TP_ID
++
++
++/*
++ * Stage 5 of the trace events.
++ *
++ * Create a toplevel descriptor for the whole probe.
++ */
++
++#define TP_ID1(_token, _system) _token##_system
++#define TP_ID(_token, _system) TP_ID1(_token, _system)
++
++/* non-const because list head will be modified when registered. */
++static __used struct lttng_probe_desc TP_ID(__probe_desc___, TRACE_SYSTEM) = {
++ .event_desc = TP_ID(__event_desc___, TRACE_SYSTEM),
++ .nr_events = ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)),
++};
++
++#undef TP_ID1
++#undef TP_ID
++
++/*
++ * Stage 6 of the trace events.
++ *
++ * Create static inline function that calculates event size.
++ */
++
++#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
++
++/* Named field types must be defined in lttng-types.h */
++
++#undef __field_full
++#define __field_full(_type, _item, _order, _base) \
++ __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
++ __event_len += sizeof(_type);
++
++#undef __array_enc_ext
++#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding) \
++ __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
++ __event_len += sizeof(_type) * (_length);
++
++#undef __dynamic_array_enc_ext
++#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
++ __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(u32)); \
++ __event_len += sizeof(u32); \
++ __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
++ __dynamic_len[__dynamic_len_idx] = (_length); \
++ __event_len += sizeof(_type) * __dynamic_len[__dynamic_len_idx]; \
++ __dynamic_len_idx++;
++
++#undef __dynamic_array_enc_ext_2
++#define __dynamic_array_enc_ext_2(_type, _item, _length1, _length2, _order, _base, _encoding)\
++ __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(u32)); \
++ __event_len += sizeof(u32); \
++ __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
++ __dynamic_len[__dynamic_len_idx] = (_length1); \
++ __event_len += sizeof(_type) * __dynamic_len[__dynamic_len_idx]; \
++ __dynamic_len_idx++; \
++ __dynamic_len[__dynamic_len_idx] = (_length2); \
++ __event_len += sizeof(_type) * __dynamic_len[__dynamic_len_idx]; \
++ __dynamic_len_idx++;
++
++#undef __string
++#define __string(_item, _src) \
++ __event_len += __dynamic_len[__dynamic_len_idx++] = strlen(_src) + 1;
++
++/*
++ * strlen_user includes \0. If returns 0, it faulted, so we set size to
++ * 1 (\0 only).
++ */
++#undef __string_from_user
++#define __string_from_user(_item, _src) \
++ __event_len += __dynamic_len[__dynamic_len_idx++] = \
++ max_t(size_t, lttng_strlen_user_inatomic(_src), 1);
++
++#undef TP_PROTO
++#define TP_PROTO(args...) args
++
++#undef TP_STRUCT__entry
++#define TP_STRUCT__entry(args...) args
++
++#undef DECLARE_EVENT_CLASS
++#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
++static inline size_t __event_get_size__##_name(size_t *__dynamic_len, _proto) \
++{ \
++ size_t __event_len = 0; \
++ unsigned int __dynamic_len_idx = 0; \
++ \
++ if (0) \
++ (void) __dynamic_len_idx; /* don't warn if unused */ \
++ _tstruct \
++ return __event_len; \
++}
++
++#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
++
++/*
++ * Stage 7 of the trace events.
++ *
++ * Create static inline function that calculates event payload alignment.
++ */
++
++#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
++
++/* Named field types must be defined in lttng-types.h */
++
++#undef __field_full
++#define __field_full(_type, _item, _order, _base) \
++ __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
++
++#undef __array_enc_ext
++#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding) \
++ __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
++
++#undef __dynamic_array_enc_ext
++#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
++ __event_align = max_t(size_t, __event_align, lttng_alignof(u32)); \
++ __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
++
++#undef __dynamic_array_enc_ext_2
++#define __dynamic_array_enc_ext_2(_type, _item, _length1, _length2, _order, _base, _encoding)\
++ __dynamic_array_enc_ext(_type, _item, _length1 + _length2, _order, _base, _encoding)
++
++#undef __string
++#define __string(_item, _src)
++
++#undef __string_from_user
++#define __string_from_user(_item, _src)
++
++#undef TP_PROTO
++#define TP_PROTO(args...) args
++
++#undef TP_STRUCT__entry
++#define TP_STRUCT__entry(args...) args
++
++#undef DECLARE_EVENT_CLASS
++#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
++static inline size_t __event_get_align__##_name(_proto) \
++{ \
++ size_t __event_align = 1; \
++ _tstruct \
++ return __event_align; \
++}
++
++#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
++
++
++/*
++ * Stage 8 of the trace events.
++ *
++ * Create structure declaration that allows the "assign" macros to access the
++ * field types.
++ */
++
++#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
++
++/* Named field types must be defined in lttng-types.h */
++
++#undef __field_full
++#define __field_full(_type, _item, _order, _base) _type _item;
++
++#undef __array_enc_ext
++#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding) \
++ _type _item;
++
++#undef __dynamic_array_enc_ext
++#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
++ _type _item;
++
++#undef __dynamic_array_enc_ext_2
++#define __dynamic_array_enc_ext_2(_type, _item, _length1, _length2, _order, _base, _encoding)\
++ __dynamic_array_enc_ext(_type, _item, _length1 + _length2, _order, _base, _encoding)
++
++#undef __string
++#define __string(_item, _src) char _item;
++
++#undef __string_from_user
++#define __string_from_user(_item, _src) \
++ __string(_item, _src)
++
++#undef TP_STRUCT__entry
++#define TP_STRUCT__entry(args...) args
++
++#undef DECLARE_EVENT_CLASS
++#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
++struct __event_typemap__##_name { \
++ _tstruct \
++};
++
++#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
++
++
++/*
++ * Stage 9 of the trace events.
++ *
++ * Create the probe function : call even size calculation and write event data
++ * into the buffer.
++ *
++ * We use both the field and assignment macros to write the fields in the order
++ * defined in the field declaration. The field declarations control the
++ * execution order, jumping to the appropriate assignment block.
++ */
++
++#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
++
++#undef __field_full
++#define __field_full(_type, _item, _order, _base) \
++ goto __assign_##_item; \
++__end_field_##_item:
++
++#undef __array_enc_ext
++#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
++ goto __assign_##_item; \
++__end_field_##_item:
++
++#undef __dynamic_array_enc_ext
++#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
++ goto __assign_##_item##_1; \
++__end_field_##_item##_1: \
++ goto __assign_##_item##_2; \
++__end_field_##_item##_2:
++
++#undef __dynamic_array_enc_ext_2
++#define __dynamic_array_enc_ext_2(_type, _item, _length1, _length2, _order, _base, _encoding)\
++ goto __assign_##_item##_1; \
++__end_field_##_item##_1: \
++ goto __assign_##_item##_2; \
++__end_field_##_item##_2: \
++ goto __assign_##_item##_3; \
++__end_field_##_item##_3:
++
++#undef __string
++#define __string(_item, _src) \
++ goto __assign_##_item; \
++__end_field_##_item:
++
++#undef __string_from_user
++#define __string_from_user(_item, _src) \
++ __string(_item, _src)
++
++/*
++ * Macros mapping tp_assign() to "=", tp_memcpy() to memcpy() and tp_strcpy() to
++ * strcpy().
++ */
++#undef tp_assign
++#define tp_assign(dest, src) \
++__assign_##dest: \
++ { \
++ __typeof__(__typemap.dest) __tmp = (src); \
++ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__tmp)); \
++ __chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp));\
++ } \
++ goto __end_field_##dest;
++
++/* fixed length array memcpy */
++#undef tp_memcpy_gen
++#define tp_memcpy_gen(write_ops, dest, src, len) \
++__assign_##dest: \
++ if (0) \
++ (void) __typemap.dest; \
++ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__typemap.dest)); \
++ __chan->ops->write_ops(&__ctx, src, len); \
++ goto __end_field_##dest;
++
++#undef tp_memcpy
++#define tp_memcpy(dest, src, len) \
++ tp_memcpy_gen(event_write, dest, src, len)
++
++#undef tp_memcpy_from_user
++#define tp_memcpy_from_user(dest, src, len) \
++ tp_memcpy_gen(event_write_from_user, dest, src, len)
++
++/* variable length sequence memcpy */
++#undef tp_memcpy_dyn_gen
++#define tp_memcpy_dyn_gen(write_ops, dest, src) \
++__assign_##dest##_1: \
++ { \
++ u32 __tmpl = __dynamic_len[__dynamic_len_idx]; \
++ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(u32)); \
++ __chan->ops->event_write(&__ctx, &__tmpl, sizeof(u32)); \
++ } \
++ goto __end_field_##dest##_1; \
++__assign_##dest##_2: \
++ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__typemap.dest)); \
++ __chan->ops->write_ops(&__ctx, src, \
++ sizeof(__typemap.dest) * __get_dynamic_array_len(dest));\
++ goto __end_field_##dest##_2;
++
++#undef tp_memcpy_dyn_gen_2
++#define tp_memcpy_dyn_gen_2(write_ops, dest, src1, src2) \
++__assign_##dest##_1: \
++ { \
++ u32 __tmpl = __dynamic_len[__dynamic_len_idx] \
++ + __dynamic_len[__dynamic_len_idx + 1]; \
++ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(u32)); \
++ __chan->ops->event_write(&__ctx, &__tmpl, sizeof(u32)); \
++ } \
++ goto __end_field_##dest##_1; \
++__assign_##dest##_2: \
++ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__typemap.dest)); \
++ __chan->ops->write_ops(&__ctx, src1, \
++ sizeof(__typemap.dest) * __get_dynamic_array_len(dest));\
++ goto __end_field_##dest##_2; \
++__assign_##dest##_3: \
++ __chan->ops->write_ops(&__ctx, src2, \
++ sizeof(__typemap.dest) * __get_dynamic_array_len(dest));\
++ goto __end_field_##dest##_3;
++
++#undef tp_memcpy_dyn
++#define tp_memcpy_dyn(dest, src) \
++ tp_memcpy_dyn_gen(event_write, dest, src)
++
++#undef tp_memcpy_dyn_2
++#define tp_memcpy_dyn_2(dest, src1, src2) \
++ tp_memcpy_dyn_gen_2(event_write, dest, src1, src2)
++
++#undef tp_memcpy_dyn_from_user
++#define tp_memcpy_dyn_from_user(dest, src) \
++ tp_memcpy_dyn_gen(event_write_from_user, dest, src)
++
++/*
++ * The string length including the final \0.
++ */
++#undef tp_copy_string_from_user
++#define tp_copy_string_from_user(dest, src) \
++ __assign_##dest: \
++ { \
++ size_t __ustrlen; \
++ \
++ if (0) \
++ (void) __typemap.dest; \
++ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__typemap.dest));\
++ __ustrlen = __get_dynamic_array_len(dest); \
++ if (likely(__ustrlen > 1)) { \
++ __chan->ops->event_write_from_user(&__ctx, src, \
++ __ustrlen - 1); \
++ } \
++ __chan->ops->event_memset(&__ctx, 0, 1); \
++ } \
++ goto __end_field_##dest;
++#undef tp_strcpy
++#define tp_strcpy(dest, src) \
++ tp_memcpy(dest, src, __get_dynamic_array_len(dest))
++
++/* Named field types must be defined in lttng-types.h */
++
++#undef __get_str
++#define __get_str(field) field
++
++#undef __get_dynamic_array
++#define __get_dynamic_array(field) field
++
++/* Beware: this get len actually consumes the len value */
++#undef __get_dynamic_array_len
++#define __get_dynamic_array_len(field) __dynamic_len[__dynamic_len_idx++]
++
++#undef TP_PROTO
++#define TP_PROTO(args...) args
++
++#undef TP_ARGS
++#define TP_ARGS(args...) args
++
++#undef TP_STRUCT__entry
++#define TP_STRUCT__entry(args...) args
++
++#undef TP_fast_assign
++#define TP_fast_assign(args...) args
++
++/*
++ * For state dump, check that "session" argument (mandatory) matches the
++ * session this event belongs to. Ensures that we write state dump data only
++ * into the started session, not into all sessions.
++ */
++#ifdef TP_SESSION_CHECK
++#define _TP_SESSION_CHECK(session, csession) (session == csession)
++#else /* TP_SESSION_CHECK */
++#define _TP_SESSION_CHECK(session, csession) 1
++#endif /* TP_SESSION_CHECK */
++
++/*
++ * __dynamic_len array length is twice the number of fields due to
++ * __dynamic_array_enc_ext_2() and tp_memcpy_dyn_2(), which are the
++ * worse case, needing 2 entries per field.
++ */
++#undef DECLARE_EVENT_CLASS
++#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
++static void __event_probe__##_name(void *__data, _proto) \
++{ \
++ struct lttng_event *__event = __data; \
++ struct lttng_channel *__chan = __event->chan; \
++ struct lib_ring_buffer_ctx __ctx; \
++ size_t __event_len, __event_align; \
++ size_t __dynamic_len_idx = 0; \
++ size_t __dynamic_len[2 * ARRAY_SIZE(__event_fields___##_name)]; \
++ struct __event_typemap__##_name __typemap; \
++ int __ret; \
++ \
++ if (0) { \
++ (void) __dynamic_len_idx; /* don't warn if unused */ \
++ (void) __typemap; /* don't warn if unused */ \
++ } \
++ if (!_TP_SESSION_CHECK(session, __chan->session)) \
++ return; \
++ if (unlikely(!ACCESS_ONCE(__chan->session->active))) \
++ return; \
++ if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
++ return; \
++ if (unlikely(!ACCESS_ONCE(__event->enabled))) \
++ return; \
++ __event_len = __event_get_size__##_name(__dynamic_len, _args); \
++ __event_align = __event_get_align__##_name(_args); \
++ lib_ring_buffer_ctx_init(&__ctx, __chan->chan, __event, __event_len, \
++ __event_align, -1); \
++ __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
++ if (__ret < 0) \
++ return; \
++ /* Control code (field ordering) */ \
++ _tstruct \
++ __chan->ops->event_commit(&__ctx); \
++ return; \
++ /* Copy code, steered by control code */ \
++ _assign \
++}
++
++#undef DECLARE_EVENT_CLASS_NOARGS
++#define DECLARE_EVENT_CLASS_NOARGS(_name, _tstruct, _assign, _print) \
++static void __event_probe__##_name(void *__data) \
++{ \
++ struct lttng_event *__event = __data; \
++ struct lttng_channel *__chan = __event->chan; \
++ struct lib_ring_buffer_ctx __ctx; \
++ size_t __event_len, __event_align; \
++ int __ret; \
++ \
++ if (!_TP_SESSION_CHECK(session, __chan->session)) \
++ return; \
++ if (unlikely(!ACCESS_ONCE(__chan->session->active))) \
++ return; \
++ if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
++ return; \
++ if (unlikely(!ACCESS_ONCE(__event->enabled))) \
++ return; \
++ __event_len = 0; \
++ __event_align = 1; \
++ lib_ring_buffer_ctx_init(&__ctx, __chan->chan, __event, __event_len, \
++ __event_align, -1); \
++ __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
++ if (__ret < 0) \
++ return; \
++ /* Control code (field ordering) */ \
++ _tstruct \
++ __chan->ops->event_commit(&__ctx); \
++ return; \
++ /* Copy code, steered by control code */ \
++ _assign \
++}
++
++#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
++
++/*
++ * Stage 10 of the trace events.
++ *
++ * Register/unregister probes at module load/unload.
++ */
++
++#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
++
++#define TP_ID1(_token, _system) _token##_system
++#define TP_ID(_token, _system) TP_ID1(_token, _system)
++#define module_init_eval1(_token, _system) module_init(_token##_system)
++#define module_init_eval(_token, _system) module_init_eval1(_token, _system)
++#define module_exit_eval1(_token, _system) module_exit(_token##_system)
++#define module_exit_eval(_token, _system) module_exit_eval1(_token, _system)
++
++#ifndef TP_MODULE_NOINIT
++static int TP_ID(__lttng_events_init__, TRACE_SYSTEM)(void)
++{
++ wrapper_vmalloc_sync_all();
++ return lttng_probe_register(&TP_ID(__probe_desc___, TRACE_SYSTEM));
++}
++
++static void TP_ID(__lttng_events_exit__, TRACE_SYSTEM)(void)
++{
++ lttng_probe_unregister(&TP_ID(__probe_desc___, TRACE_SYSTEM));
++}
++
++#ifndef TP_MODULE_NOAUTOLOAD
++module_init_eval(__lttng_events_init__, TRACE_SYSTEM);
++module_exit_eval(__lttng_events_exit__, TRACE_SYSTEM);
++#endif
++
++#endif
++
++#undef module_init_eval
++#undef module_exit_eval
++#undef TP_ID1
++#undef TP_ID
++
++#undef TP_PROTO
++#undef TP_ARGS
++#undef TRACE_EVENT_FLAGS
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-ftrace.c
+@@ -0,0 +1,201 @@
++/*
++ * probes/lttng-ftrace.c
++ *
++ * LTTng function tracer integration module.
++ *
++ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++/*
++ * Ftrace function tracer does not seem to provide synchronization between probe
++ * teardown and callback execution. Therefore, we make this module permanently
++ * loaded (unloadable).
++ *
++ * TODO: Move to register_ftrace_function() (which is exported for
++ * modules) for Linux >= 3.0. It is faster (only enables the selected
++ * functions), and will stay there.
++ */
++
++#include <linux/module.h>
++#include <linux/ftrace.h>
++#include <linux/slab.h>
++#include "../lttng-events.h"
++#include "../wrapper/ringbuffer/frontend_types.h"
++#include "../wrapper/ftrace.h"
++#include "../wrapper/vmalloc.h"
++#include "../lttng-tracer.h"
++
++static
++void lttng_ftrace_handler(unsigned long ip, unsigned long parent_ip, void **data)
++{
++ struct lttng_event *event = *data;
++ struct lttng_channel *chan = event->chan;
++ struct lib_ring_buffer_ctx ctx;
++ struct {
++ unsigned long ip;
++ unsigned long parent_ip;
++ } payload;
++ int ret;
++
++ if (unlikely(!ACCESS_ONCE(chan->session->active)))
++ return;
++ if (unlikely(!ACCESS_ONCE(chan->enabled)))
++ return;
++ if (unlikely(!ACCESS_ONCE(event->enabled)))
++ return;
++
++ lib_ring_buffer_ctx_init(&ctx, chan->chan, event,
++ sizeof(payload), lttng_alignof(payload), -1);
++ ret = chan->ops->event_reserve(&ctx, event->id);
++ if (ret < 0)
++ return;
++ payload.ip = ip;
++ payload.parent_ip = parent_ip;
++ lib_ring_buffer_align_ctx(&ctx, lttng_alignof(payload));
++ chan->ops->event_write(&ctx, &payload, sizeof(payload));
++ chan->ops->event_commit(&ctx);
++ return;
++}
++
++/*
++ * Create event description
++ */
++static
++int lttng_create_ftrace_event(const char *name, struct lttng_event *event)
++{
++ struct lttng_event_field *fields;
++ struct lttng_event_desc *desc;
++ int ret;
++
++ desc = kzalloc(sizeof(*event->desc), GFP_KERNEL);
++ if (!desc)
++ return -ENOMEM;
++ desc->name = kstrdup(name, GFP_KERNEL);
++ if (!desc->name) {
++ ret = -ENOMEM;
++ goto error_str;
++ }
++ desc->nr_fields = 2;
++ desc->fields = fields =
++ kzalloc(2 * sizeof(struct lttng_event_field), GFP_KERNEL);
++ if (!desc->fields) {
++ ret = -ENOMEM;
++ goto error_fields;
++ }
++ fields[0].name = "ip";
++ fields[0].type.atype = atype_integer;
++ fields[0].type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
++ fields[0].type.u.basic.integer.alignment = lttng_alignof(unsigned long) * CHAR_BIT;
++ fields[0].type.u.basic.integer.signedness = lttng_is_signed_type(unsigned long);
++ fields[0].type.u.basic.integer.reverse_byte_order = 0;
++ fields[0].type.u.basic.integer.base = 16;
++ fields[0].type.u.basic.integer.encoding = lttng_encode_none;
++
++ fields[1].name = "parent_ip";
++ fields[1].type.atype = atype_integer;
++ fields[1].type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
++ fields[1].type.u.basic.integer.alignment = lttng_alignof(unsigned long) * CHAR_BIT;
++ fields[1].type.u.basic.integer.signedness = lttng_is_signed_type(unsigned long);
++ fields[1].type.u.basic.integer.reverse_byte_order = 0;
++ fields[1].type.u.basic.integer.base = 16;
++ fields[1].type.u.basic.integer.encoding = lttng_encode_none;
++
++ desc->owner = THIS_MODULE;
++ event->desc = desc;
++
++ return 0;
++
++error_fields:
++ kfree(desc->name);
++error_str:
++ kfree(desc);
++ return ret;
++}
++
++static
++struct ftrace_probe_ops lttng_ftrace_ops = {
++ .func = lttng_ftrace_handler,
++};
++
++int lttng_ftrace_register(const char *name,
++ const char *symbol_name,
++ struct lttng_event *event)
++{
++ int ret;
++
++ ret = lttng_create_ftrace_event(name, event);
++ if (ret)
++ goto error;
++
++ event->u.ftrace.symbol_name = kstrdup(symbol_name, GFP_KERNEL);
++ if (!event->u.ftrace.symbol_name)
++ goto name_error;
++
++ /* Ensure the memory we just allocated don't trigger page faults */
++ wrapper_vmalloc_sync_all();
++
++ ret = wrapper_register_ftrace_function_probe(event->u.ftrace.symbol_name,
++ &lttng_ftrace_ops, event);
++ if (ret < 0)
++ goto register_error;
++ return 0;
++
++register_error:
++ kfree(event->u.ftrace.symbol_name);
++name_error:
++ kfree(event->desc->name);
++ kfree(event->desc);
++error:
++ return ret;
++}
++EXPORT_SYMBOL_GPL(lttng_ftrace_register);
++
++void lttng_ftrace_unregister(struct lttng_event *event)
++{
++ wrapper_unregister_ftrace_function_probe(event->u.ftrace.symbol_name,
++ &lttng_ftrace_ops, event);
++}
++EXPORT_SYMBOL_GPL(lttng_ftrace_unregister);
++
++void lttng_ftrace_destroy_private(struct lttng_event *event)
++{
++ kfree(event->u.ftrace.symbol_name);
++ kfree(event->desc->fields);
++ kfree(event->desc->name);
++ kfree(event->desc);
++}
++EXPORT_SYMBOL_GPL(lttng_ftrace_destroy_private);
++
++int lttng_ftrace_init(void)
++{
++ wrapper_vmalloc_sync_all();
++ return 0;
++}
++module_init(lttng_ftrace_init)
++
++/*
++ * Ftrace takes care of waiting for a grace period (RCU sched) at probe
++ * unregistration, and disables preemption around probe call.
++ */
++void lttng_ftrace_exit(void)
++{
++}
++module_exit(lttng_ftrace_exit)
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("Linux Trace Toolkit Ftrace Support");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-kprobes.c
+@@ -0,0 +1,177 @@
++/*
++ * probes/lttng-kprobes.c
++ *
++ * LTTng kprobes integration module.
++ *
++ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/kprobes.h>
++#include <linux/slab.h>
++#include "../lttng-events.h"
++#include "../wrapper/ringbuffer/frontend_types.h"
++#include "../wrapper/vmalloc.h"
++#include "../lttng-tracer.h"
++
++static
++int lttng_kprobes_handler_pre(struct kprobe *p, struct pt_regs *regs)
++{
++ struct lttng_event *event =
++ container_of(p, struct lttng_event, u.kprobe.kp);
++ struct lttng_channel *chan = event->chan;
++ struct lib_ring_buffer_ctx ctx;
++ int ret;
++ unsigned long data = (unsigned long) p->addr;
++
++ if (unlikely(!ACCESS_ONCE(chan->session->active)))
++ return 0;
++ if (unlikely(!ACCESS_ONCE(chan->enabled)))
++ return 0;
++ if (unlikely(!ACCESS_ONCE(event->enabled)))
++ return 0;
++
++ lib_ring_buffer_ctx_init(&ctx, chan->chan, event, sizeof(data),
++ lttng_alignof(data), -1);
++ ret = chan->ops->event_reserve(&ctx, event->id);
++ if (ret < 0)
++ return 0;
++ lib_ring_buffer_align_ctx(&ctx, lttng_alignof(data));
++ chan->ops->event_write(&ctx, &data, sizeof(data));
++ chan->ops->event_commit(&ctx);
++ return 0;
++}
++
++/*
++ * Create event description
++ */
++static
++int lttng_create_kprobe_event(const char *name, struct lttng_event *event)
++{
++ struct lttng_event_field *field;
++ struct lttng_event_desc *desc;
++ int ret;
++
++ desc = kzalloc(sizeof(*event->desc), GFP_KERNEL);
++ if (!desc)
++ return -ENOMEM;
++ desc->name = kstrdup(name, GFP_KERNEL);
++ if (!desc->name) {
++ ret = -ENOMEM;
++ goto error_str;
++ }
++ desc->nr_fields = 1;
++ desc->fields = field =
++ kzalloc(1 * sizeof(struct lttng_event_field), GFP_KERNEL);
++ if (!field) {
++ ret = -ENOMEM;
++ goto error_field;
++ }
++ field->name = "ip";
++ field->type.atype = atype_integer;
++ field->type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
++ field->type.u.basic.integer.alignment = lttng_alignof(unsigned long) * CHAR_BIT;
++ field->type.u.basic.integer.signedness = lttng_is_signed_type(unsigned long);
++ field->type.u.basic.integer.reverse_byte_order = 0;
++ field->type.u.basic.integer.base = 16;
++ field->type.u.basic.integer.encoding = lttng_encode_none;
++ desc->owner = THIS_MODULE;
++ event->desc = desc;
++
++ return 0;
++
++error_field:
++ kfree(desc->name);
++error_str:
++ kfree(desc);
++ return ret;
++}
++
++int lttng_kprobes_register(const char *name,
++ const char *symbol_name,
++ uint64_t offset,
++ uint64_t addr,
++ struct lttng_event *event)
++{
++ int ret;
++
++ /* Kprobes expects a NULL symbol name if unused */
++ if (symbol_name[0] == '\0')
++ symbol_name = NULL;
++
++ ret = lttng_create_kprobe_event(name, event);
++ if (ret)
++ goto error;
++ memset(&event->u.kprobe.kp, 0, sizeof(event->u.kprobe.kp));
++ event->u.kprobe.kp.pre_handler = lttng_kprobes_handler_pre;
++ if (symbol_name) {
++ event->u.kprobe.symbol_name =
++ kzalloc(LTTNG_KERNEL_SYM_NAME_LEN * sizeof(char),
++ GFP_KERNEL);
++ if (!event->u.kprobe.symbol_name) {
++ ret = -ENOMEM;
++ goto name_error;
++ }
++ memcpy(event->u.kprobe.symbol_name, symbol_name,
++ LTTNG_KERNEL_SYM_NAME_LEN * sizeof(char));
++ event->u.kprobe.kp.symbol_name =
++ event->u.kprobe.symbol_name;
++ }
++ event->u.kprobe.kp.offset = offset;
++ event->u.kprobe.kp.addr = (void *) (unsigned long) addr;
++
++ /*
++ * Ensure the memory we just allocated don't trigger page faults.
++ * Well.. kprobes itself puts the page fault handler on the blacklist,
++ * but we can never be too careful.
++ */
++ wrapper_vmalloc_sync_all();
++
++ ret = register_kprobe(&event->u.kprobe.kp);
++ if (ret)
++ goto register_error;
++ return 0;
++
++register_error:
++ kfree(event->u.kprobe.symbol_name);
++name_error:
++ kfree(event->desc->fields);
++ kfree(event->desc->name);
++ kfree(event->desc);
++error:
++ return ret;
++}
++EXPORT_SYMBOL_GPL(lttng_kprobes_register);
++
++void lttng_kprobes_unregister(struct lttng_event *event)
++{
++ unregister_kprobe(&event->u.kprobe.kp);
++}
++EXPORT_SYMBOL_GPL(lttng_kprobes_unregister);
++
++void lttng_kprobes_destroy_private(struct lttng_event *event)
++{
++ kfree(event->u.kprobe.symbol_name);
++ kfree(event->desc->fields);
++ kfree(event->desc->name);
++ kfree(event->desc);
++}
++EXPORT_SYMBOL_GPL(lttng_kprobes_destroy_private);
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("Linux Trace Toolkit Kprobes Support");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-kretprobes.c
+@@ -0,0 +1,290 @@
++/*
++ * probes/lttng-kretprobes.c
++ *
++ * LTTng kretprobes integration module.
++ *
++ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/kprobes.h>
++#include <linux/slab.h>
++#include <linux/kref.h>
++#include "../lttng-events.h"
++#include "../wrapper/ringbuffer/frontend_types.h"
++#include "../wrapper/vmalloc.h"
++#include "../lttng-tracer.h"
++
++enum lttng_kretprobe_type {
++ EVENT_ENTRY = 0,
++ EVENT_RETURN = 1,
++};
++
++struct lttng_krp {
++ struct kretprobe krp;
++ struct lttng_event *event[2]; /* ENTRY and RETURN */
++ struct kref kref_register;
++ struct kref kref_alloc;
++};
++
++static
++int _lttng_kretprobes_handler(struct kretprobe_instance *krpi,
++ struct pt_regs *regs,
++ enum lttng_kretprobe_type type)
++{
++ struct lttng_krp *lttng_krp =
++ container_of(krpi->rp, struct lttng_krp, krp);
++ struct lttng_event *event =
++ lttng_krp->event[type];
++ struct lttng_channel *chan = event->chan;
++ struct lib_ring_buffer_ctx ctx;
++ int ret;
++ struct {
++ unsigned long ip;
++ unsigned long parent_ip;
++ } payload;
++
++ if (unlikely(!ACCESS_ONCE(chan->session->active)))
++ return 0;
++ if (unlikely(!ACCESS_ONCE(chan->enabled)))
++ return 0;
++ if (unlikely(!ACCESS_ONCE(event->enabled)))
++ return 0;
++
++ payload.ip = (unsigned long) krpi->rp->kp.addr;
++ payload.parent_ip = (unsigned long) krpi->ret_addr;
++
++ lib_ring_buffer_ctx_init(&ctx, chan->chan, event, sizeof(payload),
++ lttng_alignof(payload), -1);
++ ret = chan->ops->event_reserve(&ctx, event->id);
++ if (ret < 0)
++ return 0;
++ lib_ring_buffer_align_ctx(&ctx, lttng_alignof(payload));
++ chan->ops->event_write(&ctx, &payload, sizeof(payload));
++ chan->ops->event_commit(&ctx);
++ return 0;
++}
++
++static
++int lttng_kretprobes_handler_entry(struct kretprobe_instance *krpi,
++ struct pt_regs *regs)
++{
++ return _lttng_kretprobes_handler(krpi, regs, EVENT_ENTRY);
++}
++
++static
++int lttng_kretprobes_handler_return(struct kretprobe_instance *krpi,
++ struct pt_regs *regs)
++{
++ return _lttng_kretprobes_handler(krpi, regs, EVENT_RETURN);
++}
++
++/*
++ * Create event description
++ */
++static
++int lttng_create_kprobe_event(const char *name, struct lttng_event *event,
++ enum lttng_kretprobe_type type)
++{
++ struct lttng_event_field *fields;
++ struct lttng_event_desc *desc;
++ int ret;
++ char *alloc_name;
++ size_t name_len;
++ const char *suffix = NULL;
++
++ desc = kzalloc(sizeof(*event->desc), GFP_KERNEL);
++ if (!desc)
++ return -ENOMEM;
++ name_len = strlen(name);
++ switch (type) {
++ case EVENT_ENTRY:
++ suffix = "_entry";
++ break;
++ case EVENT_RETURN:
++ suffix = "_return";
++ break;
++ }
++ name_len += strlen(suffix);
++ alloc_name = kmalloc(name_len + 1, GFP_KERNEL);
++ if (!alloc_name) {
++ ret = -ENOMEM;
++ goto error_str;
++ }
++ strcpy(alloc_name, name);
++ strcat(alloc_name, suffix);
++ desc->name = alloc_name;
++ desc->nr_fields = 2;
++ desc->fields = fields =
++ kzalloc(2 * sizeof(struct lttng_event_field), GFP_KERNEL);
++ if (!desc->fields) {
++ ret = -ENOMEM;
++ goto error_fields;
++ }
++ fields[0].name = "ip";
++ fields[0].type.atype = atype_integer;
++ fields[0].type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
++ fields[0].type.u.basic.integer.alignment = lttng_alignof(unsigned long) * CHAR_BIT;
++ fields[0].type.u.basic.integer.signedness = lttng_is_signed_type(unsigned long);
++ fields[0].type.u.basic.integer.reverse_byte_order = 0;
++ fields[0].type.u.basic.integer.base = 16;
++ fields[0].type.u.basic.integer.encoding = lttng_encode_none;
++
++ fields[1].name = "parent_ip";
++ fields[1].type.atype = atype_integer;
++ fields[1].type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
++ fields[1].type.u.basic.integer.alignment = lttng_alignof(unsigned long) * CHAR_BIT;
++ fields[1].type.u.basic.integer.signedness = lttng_is_signed_type(unsigned long);
++ fields[1].type.u.basic.integer.reverse_byte_order = 0;
++ fields[1].type.u.basic.integer.base = 16;
++ fields[1].type.u.basic.integer.encoding = lttng_encode_none;
++
++ desc->owner = THIS_MODULE;
++ event->desc = desc;
++
++ return 0;
++
++error_fields:
++ kfree(desc->name);
++error_str:
++ kfree(desc);
++ return ret;
++}
++
++int lttng_kretprobes_register(const char *name,
++ const char *symbol_name,
++ uint64_t offset,
++ uint64_t addr,
++ struct lttng_event *event_entry,
++ struct lttng_event *event_return)
++{
++ int ret;
++ struct lttng_krp *lttng_krp;
++
++ /* Kprobes expects a NULL symbol name if unused */
++ if (symbol_name[0] == '\0')
++ symbol_name = NULL;
++
++ ret = lttng_create_kprobe_event(name, event_entry, EVENT_ENTRY);
++ if (ret)
++ goto error;
++ ret = lttng_create_kprobe_event(name, event_return, EVENT_RETURN);
++ if (ret)
++ goto event_return_error;
++ lttng_krp = kzalloc(sizeof(*lttng_krp), GFP_KERNEL);
++ if (!lttng_krp)
++ goto krp_error;
++ lttng_krp->krp.entry_handler = lttng_kretprobes_handler_entry;
++ lttng_krp->krp.handler = lttng_kretprobes_handler_return;
++ if (symbol_name) {
++ char *alloc_symbol;
++
++ alloc_symbol = kstrdup(symbol_name, GFP_KERNEL);
++ if (!alloc_symbol) {
++ ret = -ENOMEM;
++ goto name_error;
++ }
++ lttng_krp->krp.kp.symbol_name =
++ alloc_symbol;
++ event_entry->u.kretprobe.symbol_name =
++ alloc_symbol;
++ event_return->u.kretprobe.symbol_name =
++ alloc_symbol;
++ }
++ lttng_krp->krp.kp.offset = offset;
++ lttng_krp->krp.kp.addr = (void *) (unsigned long) addr;
++
++ /* Allow probe handler to find event structures */
++ lttng_krp->event[EVENT_ENTRY] = event_entry;
++ lttng_krp->event[EVENT_RETURN] = event_return;
++ event_entry->u.kretprobe.lttng_krp = lttng_krp;
++ event_return->u.kretprobe.lttng_krp = lttng_krp;
++
++ /*
++ * Both events must be unregistered before the kretprobe is
++ * unregistered. Same for memory allocation.
++ */
++ kref_init(&lttng_krp->kref_alloc);
++ kref_get(&lttng_krp->kref_alloc); /* inc refcount to 2 */
++ kref_init(&lttng_krp->kref_register);
++ kref_get(&lttng_krp->kref_register); /* inc refcount to 2 */
++
++ /*
++ * Ensure the memory we just allocated don't trigger page faults.
++ * Well.. kprobes itself puts the page fault handler on the blacklist,
++ * but we can never be too careful.
++ */
++ wrapper_vmalloc_sync_all();
++
++ ret = register_kretprobe(&lttng_krp->krp);
++ if (ret)
++ goto register_error;
++ return 0;
++
++register_error:
++ kfree(lttng_krp->krp.kp.symbol_name);
++name_error:
++ kfree(lttng_krp);
++krp_error:
++ kfree(event_return->desc->fields);
++ kfree(event_return->desc->name);
++ kfree(event_return->desc);
++event_return_error:
++ kfree(event_entry->desc->fields);
++ kfree(event_entry->desc->name);
++ kfree(event_entry->desc);
++error:
++ return ret;
++}
++EXPORT_SYMBOL_GPL(lttng_kretprobes_register);
++
++static
++void _lttng_kretprobes_unregister_release(struct kref *kref)
++{
++ struct lttng_krp *lttng_krp =
++ container_of(kref, struct lttng_krp, kref_register);
++ unregister_kretprobe(&lttng_krp->krp);
++}
++
++void lttng_kretprobes_unregister(struct lttng_event *event)
++{
++ kref_put(&event->u.kretprobe.lttng_krp->kref_register,
++ _lttng_kretprobes_unregister_release);
++}
++EXPORT_SYMBOL_GPL(lttng_kretprobes_unregister);
++
++static
++void _lttng_kretprobes_release(struct kref *kref)
++{
++ struct lttng_krp *lttng_krp =
++ container_of(kref, struct lttng_krp, kref_alloc);
++ kfree(lttng_krp->krp.kp.symbol_name);
++}
++
++void lttng_kretprobes_destroy_private(struct lttng_event *event)
++{
++ kfree(event->desc->fields);
++ kfree(event->desc->name);
++ kfree(event->desc);
++ kref_put(&event->u.kretprobe.lttng_krp->kref_alloc,
++ _lttng_kretprobes_release);
++}
++EXPORT_SYMBOL_GPL(lttng_kretprobes_destroy_private);
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers");
++MODULE_DESCRIPTION("Linux Trace Toolkit Kretprobes Support");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-asoc.c
+@@ -0,0 +1,45 @@
++/*
++ * probes/lttng-probe-asoc.c
++ *
++ * LTTng asoc probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <sound/jack.h>
++#include <sound/soc.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/asoc.h>
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/asoc.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>, Paul Woegerer <paul_woegerer@mentor.com>, and Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng asoc probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-block.c
+@@ -0,0 +1,45 @@
++/*
++ * probes/lttng-probe-block.c
++ *
++ * LTTng block probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/blktrace_api.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/block.h>
++
++#include "../wrapper/tracepoint.h"
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/block.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
++MODULE_DESCRIPTION("LTTng block probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-btrfs.c
+@@ -0,0 +1,48 @@
++/*
++ * probes/lttng-probe-btrfs.c
++ *
++ * LTTng btrfs probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <../fs/btrfs/ctree.h>
++#include <../fs/btrfs/transaction.h>
++#include <../fs/btrfs/volumes.h>
++#include <linux/dcache.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/btrfs.h>
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/btrfs.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng btrfs probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-compaction.c
+@@ -0,0 +1,43 @@
++/*
++ * probes/lttng-probe-compaction.c
++ *
++ * LTTng compaction probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/compaction.h>
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/compaction.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng compaction probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-ext3.c
+@@ -0,0 +1,52 @@
++/*
++ * probes/lttng-probe-ext3.c
++ *
++ * LTTng ext3 probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/dcache.h>
++#include <linux/version.h>
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
++#include <../fs/ext3/ext3.h>
++#else
++#include <linux/ext3_fs_i.h>
++#endif
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/ext3.h>
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/ext3.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>, Paul Woegerer <paul_woegerer@mentor.com>, and Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng ext3 probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-ext4.c
+@@ -0,0 +1,51 @@
++/*
++ * probes/lttng-probe-ext4.c
++ *
++ * LTTng ext4 probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <../fs/ext4/ext4.h>
++#include <../fs/ext4/mballoc.h>
++#include <../fs/ext4/ext4_extents.h>
++#include <linux/dcache.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/ext4.h>
++
++#include "../lttng-kernel-version.h"
++#include "../wrapper/tracepoint.h"
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/ext4.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng ext4 probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-gpio.c
+@@ -0,0 +1,43 @@
++/*
++ * probes/lttng-probe-gpio.c
++ *
++ * LTTng gpio probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/gpio.h>
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/gpio.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
++MODULE_DESCRIPTION("LTTng gpio probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-irq.c
+@@ -0,0 +1,45 @@
++/*
++ * probes/lttng-probe-irq.c
++ *
++ * LTTng irq probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/interrupt.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/irq.h>
++
++#include "../wrapper/tracepoint.h"
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/irq.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
++MODULE_DESCRIPTION("LTTng irq probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-jbd.c
+@@ -0,0 +1,43 @@
++/*
++ * probes/lttng-probe-jbd.c
++ *
++ * LTTng jbd probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/jbd.h>
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/jbd.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>, Paul Woegerer <paul_woegerer@mentor.com>, and Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng jbd probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-jbd2.c
+@@ -0,0 +1,45 @@
++/*
++ * probes/lttng-probe-jbd2.c
++ *
++ * LTTng jbd2 probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/jbd2.h>
++
++#include "../wrapper/tracepoint.h"
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/jbd2.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com> and Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng jbd2 probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-kmem.c
+@@ -0,0 +1,45 @@
++/*
++ * probes/lttng-probe-kmem.c
++ *
++ * LTTng kmem probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/kmem.h>
++
++#include "../wrapper/tracepoint.h"
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/kmem.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com> and Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng kmem probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-kvm-x86-mmu.c
+@@ -0,0 +1,43 @@
++/*
++ * probes/lttng-probe-kvm.c
++ *
++ * LTTng kvm probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/kvm_host.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include "../wrapper/tracepoint.h"
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module/arch/x86/kvm
++#include "../instrumentation/events/lttng-module/arch/x86/kvm/mmutrace.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
++MODULE_DESCRIPTION("LTTng kvm mmu probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-kvm-x86.c
+@@ -0,0 +1,45 @@
++/*
++ * probes/lttng-probe-kvm.c
++ *
++ * LTTng kvm probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/kvm_host.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/kvm.h>
++
++#include "../wrapper/tracepoint.h"
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module/arch/x86/kvm
++#include "../instrumentation/events/lttng-module/arch/x86/kvm/trace.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
++MODULE_DESCRIPTION("LTTng kvm probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-kvm.c
+@@ -0,0 +1,45 @@
++/*
++ * probes/lttng-probe-kvm.c
++ *
++ * LTTng kvm probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/kvm_host.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/kvm.h>
++
++#include "../wrapper/tracepoint.h"
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/kvm.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
++MODULE_DESCRIPTION("LTTng kvm probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-lock.c
+@@ -0,0 +1,50 @@
++/*
++ * probes/lttng-probe-lock.c
++ *
++ * LTTng lock probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/version.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
++#include <trace/events/lock.h>
++#else
++#include <trace/events/lockdep.h>
++#endif
++
++#include "../wrapper/tracepoint.h"
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/lock.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com> and Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng lock probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-module.c
+@@ -0,0 +1,45 @@
++/*
++ * probes/lttng-probe-module.c
++ *
++ * LTTng module probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/module.h>
++
++#include "../wrapper/tracepoint.h"
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/module.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com> and Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng module probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-napi.c
+@@ -0,0 +1,45 @@
++/*
++ * probes/lttng-probe-napi.c
++ *
++ * LTTng napi probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/napi.h>
++
++#include "../wrapper/tracepoint.h"
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/napi.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
++MODULE_DESCRIPTION("LTTng napi probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-net.c
+@@ -0,0 +1,43 @@
++/*
++ * probes/lttng-probe-net.c
++ *
++ * LTTng net probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/net.h>
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/net.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
++MODULE_DESCRIPTION("LTTng net probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-power.c
+@@ -0,0 +1,45 @@
++/*
++ * probes/lttng-probe-power.c
++ *
++ * LTTng power probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/power.h>
++
++#include "../wrapper/tracepoint.h"
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/power.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com> and Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng power probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-printk.c
+@@ -0,0 +1,43 @@
++/*
++ * probes/lttng-probe-printk.c
++ *
++ * LTTng printk probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/printk.h>
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/printk.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng printk probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-random.c
+@@ -0,0 +1,43 @@
++/*
++ * probes/lttng-probe-random.c
++ *
++ * LTTng random probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/random.h>
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/random.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng random probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-rcu.c
+@@ -0,0 +1,44 @@
++/*
++ * probes/lttng-probe-rcu.c
++ *
++ * LTTng rcu probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/rcupdate.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/rcu.h>
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/rcu.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng rcu probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-regmap.c
+@@ -0,0 +1,44 @@
++/*
++ * probes/lttng-probe-regmap.c
++ *
++ * LTTng regmap probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/device.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/regmap.h>
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/regmap.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng regmap probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-regulator.c
+@@ -0,0 +1,43 @@
++/*
++ * probes/lttng-probe-regulator.c
++ *
++ * LTTng regulator probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/regulator.h>
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/regulator.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
++MODULE_DESCRIPTION("LTTng regulator probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-rpm.c
+@@ -0,0 +1,44 @@
++/*
++ * probes/lttng-probe-rpm.c
++ *
++ * LTTng rpm probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/device.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/rpm.h>
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/rpm.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng rpm probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-sched.c
+@@ -0,0 +1,44 @@
++/*
++ * probes/lttng-probe-sched.c
++ *
++ * LTTng sched probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/sched.h>
++
++#include "../wrapper/tracepoint.h"
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/sched.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
++MODULE_DESCRIPTION("LTTng sched probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-scsi.c
+@@ -0,0 +1,44 @@
++/*
++ * probes/lttng-probe-scsi.c
++ *
++ * LTTng scsi probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <scsi/scsi_device.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/scsi.h>
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/scsi.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com> and Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng scsi probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-signal.c
+@@ -0,0 +1,42 @@
++/*
++ * probes/lttng-probe-signal.c
++ *
++ * LTTng signal probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/signal.h>
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/signal.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
++MODULE_DESCRIPTION("LTTng signal probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-skb.c
+@@ -0,0 +1,45 @@
++/*
++ * probes/lttng-probe-skb.c
++ *
++ * LTTng skb probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/skb.h>
++
++#include "../wrapper/tracepoint.h"
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/skb.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com> and Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng skb probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-sock.c
+@@ -0,0 +1,43 @@
++/*
++ * probes/lttng-probe-sock.c
++ *
++ * LTTng sock probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/sock.h>
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/sock.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
++MODULE_DESCRIPTION("LTTng sock probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-statedump.c
+@@ -0,0 +1,46 @@
++/*
++ * probes/lttng-probe-statedump.c
++ *
++ * LTTng statedump probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/netlink.h>
++#include <linux/inet.h>
++#include <linux/ip.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/sched.h>
++#include "../lttng-events.h"
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TP_SESSION_CHECK
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++#define TRACE_INCLUDE_FILE lttng-statedump
++
++#include "../instrumentation/events/lttng-module/lttng-statedump.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
++MODULE_DESCRIPTION("LTTng statedump probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-sunrpc.c
+@@ -0,0 +1,43 @@
++/*
++ * probes/lttng-probe-sunrpc.c
++ *
++ * LTTng sunrpc probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/sunrpc.h>
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/sunrpc.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng sunrpc probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-timer.c
+@@ -0,0 +1,46 @@
++/*
++ * probes/lttng-probe-timer.c
++ *
++ * LTTng timer probes.
++ *
++ * Copyright (C) 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++
++#include <linux/sched.h>
++#include <trace/events/timer.h>
++
++#include "../wrapper/tracepoint.h"
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/timer.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
++MODULE_DESCRIPTION("LTTng timer probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-udp.c
+@@ -0,0 +1,43 @@
++/*
++ * probes/lttng-probe-udp.c
++ *
++ * LTTng udp probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/udp.h>
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/udp.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>");
++MODULE_DESCRIPTION("LTTng udp probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-user.c
+@@ -0,0 +1,54 @@
++/*
++ * lttng-probe-user.c
++ *
++ * Copyright (C) 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/uaccess.h>
++#include "lttng-probe-user.h"
++
++/*
++ * Calculate string length. Include final null terminating character if there is
++ * one, or ends at first fault. Disabling page faults ensures that we can safely
++ * call this from pretty much any context, including those where the caller
++ * holds mmap_sem, or any lock which nests in mmap_sem.
++ */
++long lttng_strlen_user_inatomic(const char *addr)
++{
++ long count = 0;
++ mm_segment_t old_fs = get_fs();
++
++ set_fs(KERNEL_DS);
++ pagefault_disable();
++ for (;;) {
++ char v;
++ unsigned long ret;
++
++ ret = __copy_from_user_inatomic(&v,
++ (__force const char __user *)(addr),
++ sizeof(v));
++ if (unlikely(ret > 0))
++ break;
++ count++;
++ if (unlikely(!v))
++ break;
++ addr++;
++ }
++ pagefault_enable();
++ set_fs(old_fs);
++ return count;
++}
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-user.h
+@@ -0,0 +1,30 @@
++#ifndef _LTTNG_PROBE_USER_H
++#define _LTTNG_PROBE_USER_H
++
++/*
++ * lttng-probe-user.h
++ *
++ * Copyright (C) 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++/*
++ * Calculate string length. Include final null terminating character if there is
++ * one, or ends at first fault.
++ */
++long lttng_strlen_user_inatomic(const char *addr);
++
++#endif /* _LTTNG_PROBE_USER_H */
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-vmscan.c
+@@ -0,0 +1,45 @@
++/*
++ * probes/lttng-probe-vmscan.c
++ *
++ * LTTng vmscan probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/vmscan.h>
++
++#include "../lttng-kernel-version.h"
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/vmscan.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Wade Farnsworth <wade_farnsworth@mentor.com>, Paul Woegerer <paul_woegerer@mentor.com>, and Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng vmscan probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-workqueue.c
+@@ -0,0 +1,49 @@
++/*
++ * probes/lttng-probe-workqueue.c
++ *
++ * LTTng workqueue probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/idr.h>
++
++struct cpu_workqueue_struct;
++struct pool_workqueue;
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/workqueue.h>
++
++#include "../wrapper/tracepoint.h"
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/workqueue.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng workqueue probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-probe-writeback.c
+@@ -0,0 +1,54 @@
++/*
++ * probes/lttng-probe-writeback.c
++ *
++ * LTTng writeback probes.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ * Copyright (C) 2012 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/mm.h>
++
++/*
++ * Create the tracepoint static inlines from the kernel to validate that our
++ * trace event macros match the kernel we run on.
++ */
++#include <trace/events/writeback.h>
++
++#include "../lttng-kernel-version.h"
++#include "../wrapper/writeback.h"
++
++/* #if <check version number if global_dirty_limit will be exported> */
++
++#define global_dirty_limit wrapper_global_dirty_limit()
++
++/* #endif <check version number> */
++
++/*
++ * Create LTTng tracepoint probes.
++ */
++#define LTTNG_PACKAGE_BUILD
++#define CREATE_TRACE_POINTS
++#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
++
++#include "../instrumentation/events/lttng-module/writeback.h"
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Andrew Gabbasov <andrew_gabbasov@mentor.com>");
++MODULE_DESCRIPTION("LTTng writeback probes");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-type-list.h
+@@ -0,0 +1,33 @@
++/*
++ * lttng-type-list.h
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++/* Type list, used to create metadata */
++
++/* Enumerations */
++TRACE_EVENT_ENUM(hrtimer_mode,
++ V(HRTIMER_MODE_ABS),
++ V(HRTIMER_MODE_REL),
++ V(HRTIMER_MODE_PINNED),
++ V(HRTIMER_MODE_ABS_PINNED),
++ V(HRTIMER_MODE_REL_PINNED),
++ R(HRTIMER_MODE_UNDEFINED, 0x04, 0x20), /* Example (to remove) */
++)
++
++TRACE_EVENT_TYPE(hrtimer_mode, enum, unsigned char)
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-types.c
+@@ -0,0 +1,61 @@
++/*
++ * probes/lttng-types.c
++ *
++ * LTTng types.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include "../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
++#include "../lttng-events.h"
++#include "lttng-types.h"
++#include <linux/hrtimer.h>
++
++#define STAGE_EXPORT_ENUMS
++#include "lttng-types.h"
++#include "lttng-type-list.h"
++#undef STAGE_EXPORT_ENUMS
++
++struct lttng_enum lttng_enums[] = {
++#define STAGE_EXPORT_TYPES
++#include "lttng-types.h"
++#include "lttng-type-list.h"
++#undef STAGE_EXPORT_TYPES
++};
++
++static int lttng_types_init(void)
++{
++ int ret = 0;
++
++ wrapper_vmalloc_sync_all();
++ /* TODO */
++ return ret;
++}
++
++module_init(lttng_types_init);
++
++static void lttng_types_exit(void)
++{
++}
++
++module_exit(lttng_types_exit);
++
++MODULE_LICENSE("GPL and additional rights");
++MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
++MODULE_DESCRIPTION("LTTng types");
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng-types.h
+@@ -0,0 +1,84 @@
++/*
++ * Protect against multiple inclusion of structure declarations, but run the
++ * stages below each time.
++ */
++#ifndef _LTTNG_PROBES_LTTNG_TYPES_H
++#define _LTTNG_PROBES_LTTNG_TYPES_H
++
++/*
++ * probes/lttng-types.h
++ *
++ * LTTng types.
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/seq_file.h>
++#include "lttng.h"
++#include "../lttng-events.h"
++#include "../lttng-tracer.h"
++#include "../lttng-endian.h"
++
++#endif /* _LTTNG_PROBES_LTTNG_TYPES_H */
++
++/* Export enumerations */
++
++#ifdef STAGE_EXPORT_ENUMS
++
++#undef TRACE_EVENT_TYPE
++#define TRACE_EVENT_TYPE(_name, _abstract_type, args...)
++
++#undef TRACE_EVENT_ENUM
++#define TRACE_EVENT_ENUM(_name, _entries...) \
++ const struct lttng_enum_entry __trace_event_enum_##_name[] = { \
++ PARAMS(_entries) \
++ };
++
++/* Enumeration entry (single value) */
++#undef V
++#define V(_string) { _string, _string, #_string}
++
++/* Enumeration entry (range) */
++#undef R
++#define R(_string, _range_start, _range_end) \
++ { _range_start, _range_end, #_string }
++
++#endif /* STAGE_EXPORT_ENUMS */
++
++
++/* Export named types */
++
++#ifdef STAGE_EXPORT_TYPES
++
++#undef TRACE_EVENT_TYPE___enum
++#define TRACE_EVENT_TYPE___enum(_name, _container_type) \
++ { \
++ .name = #_name, \
++ .container_type = __type_integer(_container_type, __BYTE_ORDER, 10, none), \
++ .entries = __trace_event_enum_##_name, \
++ .len = ARRAY_SIZE(__trace_event_enum_##_name), \
++ },
++
++/* Local declaration */
++#undef TRACE_EVENT_TYPE
++#define TRACE_EVENT_TYPE(_name, _abstract_type, args...) \
++ TRACE_EVENT_TYPE___##_abstract_type(_name, args)
++
++#undef TRACE_EVENT_ENUM
++#define TRACE_EVENT_ENUM(_name, _entries...)
++
++#endif /* STAGE_EXPORT_TYPES */
+--- /dev/null
++++ b/drivers/staging/lttng/probes/lttng.h
+@@ -0,0 +1,27 @@
++#ifndef _LTTNG_PROBES_LTTNG_H
++#define _LTTNG_PROBES_LTTNG_H
++
++/*
++ * lttng.h
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#undef PARAMS
++#define PARAMS(args...) args
++
++#endif /* _LTTNG_PROBES_LTTNG_H */
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/compiler.h
+@@ -0,0 +1,42 @@
++#ifndef _LTTNG_WRAPPER_COMPILER_H
++#define _LTTNG_WRAPPER_COMPILER_H
++
++/*
++ * wrapper/compiler.h
++ *
++ * Copyright (C) 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/compiler.h>
++
++/*
++ * Don't allow compiling with buggy compiler.
++ */
++
++#ifdef GCC_VERSION
++
++/*
++ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854
++ */
++# ifdef __ARMEL__
++# if GCC_VERSION >= 40800 && GCC_VERSION <= 40802
++# error Your gcc version produces clobbered frame accesses
++# endif
++# endif
++#endif
++
++#endif /* _LTTNG_WRAPPER_COMPILER_H */
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/fdtable.c
+@@ -0,0 +1,57 @@
++/*
++ * wrapper/fdtable.c
++ *
++ * Copyright (C) 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/version.h>
++#include <linux/spinlock.h>
++#include "fdtable.h"
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
++
++/*
++ * Reimplementation of iterate_fd() for kernels between 2.6.32 and 3.6
++ * (inclusive).
++ */
++int lttng_iterate_fd(struct files_struct *files,
++ unsigned int first,
++ int (*cb)(const void *, struct file *, unsigned int),
++ const void *ctx)
++{
++ struct fdtable *fdt;
++ struct file *filp;
++ unsigned int i;
++ int res = 0;
++
++ if (!files)
++ return 0;
++ spin_lock(&files->file_lock);
++ fdt = files_fdtable(files);
++ for (i = 0; i < fdt->max_fds; i++) {
++ filp = fcheck_files(files, i);
++ if (!filp)
++ continue;
++ res = cb(ctx, filp, i);
++ if (res)
++ break;
++ }
++ spin_unlock(&files->file_lock);
++ return res;
++}
++
++#endif
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/fdtable.h
+@@ -0,0 +1,44 @@
++#ifndef _LTTNG_WRAPPER_FDTABLE_H
++#define _LTTNG_WRAPPER_FDTABLE_H
++
++/*
++ * wrapper/fdtable.h
++ *
++ * Copyright (C) 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/version.h>
++#include <linux/fdtable.h>
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0))
++
++int lttng_iterate_fd(struct files_struct *files,
++ unsigned int first,
++ int (*cb)(const void *, struct file *, unsigned int),
++ const void *ctx);
++
++#else
++
++/*
++ * iterate_fd() appeared at commit
++ * c3c073f808b22dfae15ef8412b6f7b998644139a in the Linux kernel (first
++ * released kernel: v3.7).
++ */
++#define lttng_iterate_fd iterate_fd
++
++#endif
++#endif /* _LTTNG_WRAPPER_FDTABLE_H */
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/ftrace.h
+@@ -0,0 +1,84 @@
++#ifndef _LTTNG_WRAPPER_FTRACE_H
++#define _LTTNG_WRAPPER_FTRACE_H
++
++/*
++ * wrapper/ftrace.h
++ *
++ * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
++ * available, else we need to have a kernel that exports this function to GPL
++ * modules.
++ *
++ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/ftrace.h>
++
++#ifdef CONFIG_KALLSYMS
++
++#include <linux/kallsyms.h>
++#include "kallsyms.h"
++
++static inline
++int wrapper_register_ftrace_function_probe(char *glob,
++ struct ftrace_probe_ops *ops, void *data)
++{
++ int (*register_ftrace_function_probe_sym)(char *glob,
++ struct ftrace_probe_ops *ops, void *data);
++
++ register_ftrace_function_probe_sym = (void *) kallsyms_lookup_funcptr("register_ftrace_function_probe");
++ if (register_ftrace_function_probe_sym) {
++ return register_ftrace_function_probe_sym(glob, ops, data);
++ } else {
++ printk(KERN_WARNING "LTTng: register_ftrace_function_probe symbol lookup failed.\n");
++ return -EINVAL;
++ }
++}
++
++static inline
++void wrapper_unregister_ftrace_function_probe(char *glob,
++ struct ftrace_probe_ops *ops, void *data)
++{
++ void (*unregister_ftrace_function_probe_sym)(char *glob,
++ struct ftrace_probe_ops *ops, void *data);
++
++ unregister_ftrace_function_probe_sym = (void *) kallsyms_lookup_funcptr("unregister_ftrace_function_probe");
++ if (unregister_ftrace_function_probe_sym) {
++ unregister_ftrace_function_probe_sym(glob, ops, data);
++ } else {
++ printk(KERN_WARNING "LTTng: unregister_ftrace_function_probe symbol lookup failed.\n");
++ WARN_ON(1);
++ }
++}
++
++#else
++
++static inline
++int wrapper_register_ftrace_function_probe(char *glob,
++ struct ftrace_probe_ops *ops, void *data)
++{
++ return register_ftrace_function_probe(glob, ops, data);
++}
++
++static inline
++void wrapper_unregister_ftrace_function_probe(char *glob,
++ struct ftrace_probe_ops *ops, void *data)
++{
++ return unregister_ftrace_function_probe(glob, ops, data);
++}
++#endif
++
++#endif /* _LTTNG_WRAPPER_FTRACE_H */
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/inline_memcpy.h
+@@ -0,0 +1,23 @@
++/*
++ * wrapper/inline_memcpy.h
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#if !defined(__HAVE_ARCH_INLINE_MEMCPY) && !defined(inline_memcpy)
++#define inline_memcpy memcpy
++#endif
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/irq.h
+@@ -0,0 +1,38 @@
++#ifndef _LTTNG_WRAPPER_IRQ_H
++#define _LTTNG_WRAPPER_IRQ_H
++
++/*
++ * wrapper/irq.h
++ *
++ * wrapper around linux/irq.h.
++ *
++ * Copyright (C) 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/version.h>
++
++/*
++ * Starting from the 3.12 Linux kernel, all architectures use the
++ * generic hard irqs system. More details can be seen at commit
++ * 0244ad004a54e39308d495fee0a2e637f8b5c317 in the Linux kernel GIT.
++ */
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0) \
++ || defined(CONFIG_GENERIC_HARDIRQS))
++# define CONFIG_LTTNG_HAS_LIST_IRQ
++#endif
++
++#endif /* _LTTNG_WRAPPER_IRQ_H */
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/irqdesc.c
+@@ -0,0 +1,58 @@
++/*
++ * wrapper/irqdesc.c
++ *
++ * wrapper around irq_to_desc. Using KALLSYMS to get its address when
++ * available, else we need to have a kernel that exports this function to GPL
++ * modules.
++ *
++ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#ifdef CONFIG_KALLSYMS
++
++#include <linux/kallsyms.h>
++#include <linux/interrupt.h>
++#include <linux/irqnr.h>
++#include "kallsyms.h"
++#include "irqdesc.h"
++
++static
++struct irq_desc *(*irq_to_desc_sym)(unsigned int irq);
++
++struct irq_desc *wrapper_irq_to_desc(unsigned int irq)
++{
++ if (!irq_to_desc_sym)
++ irq_to_desc_sym = (void *) kallsyms_lookup_funcptr("irq_to_desc");
++ if (irq_to_desc_sym) {
++ return irq_to_desc_sym(irq);
++ } else {
++ printk(KERN_WARNING "LTTng: irq_to_desc symbol lookup failed.\n");
++ return NULL;
++ }
++}
++
++#else
++
++#include <linux/interrupt.h>
++#include <linux/irqnr.h>
++
++struct irq_desc *wrapper_irq_to_desc(unsigned int irq)
++{
++ return irq_to_desc(irq);
++}
++
++#endif
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/irqdesc.h
+@@ -0,0 +1,33 @@
++#ifndef _LTTNG_WRAPPER_IRQDESC_H
++#define _LTTNG_WRAPPER_IRQDESC_H
++
++/*
++ * wrapper/irqdesc.h
++ *
++ * wrapper around irq_to_desc. Using KALLSYMS to get its address when
++ * available, else we need to have a kernel that exports this function to GPL
++ * modules.
++ *
++ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/interrupt.h>
++#include <linux/irqnr.h>
++
++struct irq_desc *wrapper_irq_to_desc(unsigned int irq);
++
++#endif /* _LTTNG_WRAPPER_IRQDESC_H */
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/kallsyms.h
+@@ -0,0 +1,51 @@
++#ifndef _LTTNG_WRAPPER_KALLSYMS_H
++#define _LTTNG_WRAPPER_KALLSYMS_H
++
++/*
++ * wrapper/kallsyms.h
++ *
++ * wrapper around kallsyms_lookup_name. Implements arch-dependent code for
++ * arches where the address of the start of the function body is different
++ * from the pointer which can be used to call the function, e.g. ARM THUMB2.
++ *
++ * Copyright (C) 2011 Avik Sil (avik.sil@linaro.org)
++ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/kallsyms.h>
++
++static inline
++unsigned long kallsyms_lookup_funcptr(const char *name)
++{
++ unsigned long addr;
++
++ addr = kallsyms_lookup_name(name);
++#ifdef CONFIG_ARM
++#ifdef CONFIG_THUMB2_KERNEL
++ if (addr)
++ addr |= 1; /* set bit 0 in address for thumb mode */
++#endif
++#endif
++ return addr;
++}
++
++static inline
++unsigned long kallsyms_lookup_dataptr(const char *name)
++{
++ return kallsyms_lookup_name(name);
++}
++#endif /* _LTTNG_WRAPPER_KALLSYMS_H */
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/nsproxy.h
+@@ -0,0 +1,42 @@
++#ifndef _LTTNG_WRAPPER_NSPROXY_H
++#define _LTTNG_WRAPPER_NSPROXY_H
++
++/*
++ * wrapper/nsproxy.h
++ *
++ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/version.h>
++#include <linux/nsproxy.h>
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
++static inline
++struct pid_namespace *lttng_get_proxy_pid_ns(struct nsproxy *proxy)
++{
++ return proxy->pid_ns_for_children;
++}
++#else
++static inline
++struct pid_namespace *lttng_get_proxy_pid_ns(struct nsproxy *proxy)
++{
++ return proxy->pid_ns;
++}
++#endif
++
++
++#endif /* _LTTNG_WRAPPER_NSPROXY_H */
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/perf.h
+@@ -0,0 +1,71 @@
++#ifndef _LTTNG_WRAPPER_PERF_H
++#define _LTTNG_WRAPPER_PERF_H
++
++/*
++ * wrapper/perf.h
++ *
++ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/perf_event.h>
++
++#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99))
++static inline struct perf_event *
++wrapper_perf_event_create_kernel_counter(struct perf_event_attr *attr,
++ int cpu,
++ struct task_struct *task,
++ perf_overflow_handler_t callback)
++{
++ return perf_event_create_kernel_counter(attr, cpu, task, callback, NULL);
++}
++#else /* defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99)) */
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
++static inline struct perf_event *
++wrapper_perf_event_create_kernel_counter(struct perf_event_attr *attr,
++ int cpu,
++ struct task_struct *task,
++ perf_overflow_handler_t callback)
++{
++ return perf_event_create_kernel_counter(attr, cpu, task, callback);
++}
++#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) */
++static inline struct perf_event *
++wrapper_perf_event_create_kernel_counter(struct perf_event_attr *attr,
++ int cpu,
++ struct task_struct *task,
++ perf_overflow_handler_t callback)
++{
++ pid_t pid;
++
++ if (!task)
++ pid = -1;
++ else
++ pid = task->pid;
++
++ return perf_event_create_kernel_counter(attr, cpu, pid, callback);
++}
++
++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) */
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36))
++#define local64_read(l) atomic64_read(l)
++#endif
++
++#endif /* defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99)) */
++
++#endif /* _LTTNG_WRAPPER_PERF_H */
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/poll.h
+@@ -0,0 +1,33 @@
++#ifndef _LTTNG_WRAPPER_POLL_H
++#define _LTTNG_WRAPPER_POLL_H
++
++/*
++ * wrapper/poll.h
++ *
++ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/poll.h>
++
++/*
++ * Note: poll_wait_set_exclusive() is defined as no-op. Thundering herd
++ * effect can be noticed with large number of consumer threads.
++ */
++
++#define poll_wait_set_exclusive(poll_table)
++
++#endif /* _LTTNG_WRAPPER_POLL_H */
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/random.c
+@@ -0,0 +1,77 @@
++/*
++ * wrapper/random.c
++ *
++ * wrapper around bootid read. Using KALLSYMS to get its address when
++ * available, else we need to have a kernel that exports this function to GPL
++ * modules.
++ *
++ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++/* boot_id depends on sysctl */
++#if defined(CONFIG_SYSCTL)
++
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/sched.h>
++#include <linux/uaccess.h>
++#include "random.h"
++
++/*
++ * Returns string boot id.
++ */
++int wrapper_get_bootid(char *bootid)
++{
++ struct file *file;
++ int ret;
++ ssize_t len;
++ mm_segment_t old_fs;
++
++ file = filp_open("/proc/sys/kernel/random/boot_id", O_RDONLY, 0);
++ if (IS_ERR(file))
++ return PTR_ERR(file);
++
++ old_fs = get_fs();
++ set_fs(KERNEL_DS);
++
++ if (!file->f_op || !file->f_op->read) {
++ ret = -EINVAL;
++ goto end;
++ }
++
++ len = file->f_op->read(file, bootid, BOOT_ID_LEN - 1, &file->f_pos);
++ if (len != BOOT_ID_LEN - 1) {
++ ret = -EINVAL;
++ goto end;
++ }
++
++ bootid[BOOT_ID_LEN - 1] = '\0';
++ ret = 0;
++end:
++ set_fs(old_fs);
++ filp_close(file, current->files);
++ return ret;
++}
++
++#else
++
++int wrapper_get_bootid(char *bootid)
++{
++ return -ENOSYS;
++}
++
++#endif
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/random.h
+@@ -0,0 +1,32 @@
++#ifndef _LTTNG_WRAPPER_RANDOM_H
++#define _LTTNG_WRAPPER_RANDOM_H
++
++/*
++ * wrapper/random.h
++ *
++ * wrapper around bootid read. Using KALLSYMS to get its address when
++ * available, else we need to have a kernel that exports this function to GPL
++ * modules.
++ *
++ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#define BOOT_ID_LEN 37
++
++int wrapper_get_bootid(char *bootid);
++
++#endif /* _LTTNG_WRAPPER_RANDOM_H */
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/ringbuffer/api.h
+@@ -0,0 +1 @@
++#include "../../lib/ringbuffer/api.h"
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/ringbuffer/backend.h
+@@ -0,0 +1 @@
++#include "../../lib/ringbuffer/backend.h"
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/ringbuffer/backend_internal.h
+@@ -0,0 +1,2 @@
++#include "../../wrapper/inline_memcpy.h"
++#include "../../lib/ringbuffer/backend_internal.h"
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/ringbuffer/backend_types.h
+@@ -0,0 +1 @@
++#include "../../lib/ringbuffer/backend_types.h"
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/ringbuffer/config.h
+@@ -0,0 +1 @@
++#include "../../lib/ringbuffer/config.h"
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/ringbuffer/frontend.h
+@@ -0,0 +1 @@
++#include "../../lib/ringbuffer/frontend.h"
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/ringbuffer/frontend_api.h
+@@ -0,0 +1 @@
++#include "../../lib/ringbuffer/frontend_api.h"
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/ringbuffer/frontend_internal.h
+@@ -0,0 +1 @@
++#include "../../lib/ringbuffer/frontend_internal.h"
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/ringbuffer/frontend_types.h
+@@ -0,0 +1 @@
++#include "../../lib/ringbuffer/frontend_types.h"
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/ringbuffer/iterator.h
+@@ -0,0 +1 @@
++#include "../../lib/ringbuffer/iterator.h"
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/ringbuffer/nohz.h
+@@ -0,0 +1 @@
++#include "../../lib/ringbuffer/nohz.h"
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/ringbuffer/vatomic.h
+@@ -0,0 +1 @@
++#include "../../lib/ringbuffer/vatomic.h"
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/ringbuffer/vfs.h
+@@ -0,0 +1 @@
++#include "../../lib/ringbuffer/vfs.h"
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/spinlock.h
+@@ -0,0 +1,47 @@
++#ifndef _LTTNG_WRAPPER_SPINLOCK_H
++#define _LTTNG_WRAPPER_SPINLOCK_H
++
++/*
++ * wrapper/spinlock.h
++ *
++ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/version.h>
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
++
++#include <linux/string.h>
++
++#define raw_spin_lock_init(lock) \
++ do { \
++ raw_spinlock_t __lock = __RAW_SPIN_LOCK_UNLOCKED; \
++ memcpy(lock, &__lock, sizeof(lock)); \
++ } while (0)
++
++#define raw_spin_is_locked(lock) __raw_spin_is_locked(lock)
++
++#define wrapper_desc_spin_lock(lock) spin_lock(lock)
++#define wrapper_desc_spin_unlock(lock) spin_unlock(lock)
++
++#else
++
++#define wrapper_desc_spin_lock(lock) raw_spin_lock(lock)
++#define wrapper_desc_spin_unlock(lock) raw_spin_unlock(lock)
++
++#endif
++#endif /* _LTTNG_WRAPPER_SPINLOCK_H */
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/splice.c
+@@ -0,0 +1,60 @@
++/*
++ * wrapper/splice.c
++ *
++ * wrapper around splice_to_pipe. Using KALLSYMS to get its address when
++ * available, else we need to have a kernel that exports this function to GPL
++ * modules.
++ *
++ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#ifdef CONFIG_KALLSYMS
++
++#include <linux/kallsyms.h>
++#include <linux/fs.h>
++#include <linux/splice.h>
++#include "kallsyms.h"
++
++static
++ssize_t (*splice_to_pipe_sym)(struct pipe_inode_info *pipe,
++ struct splice_pipe_desc *spd);
++
++ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
++ struct splice_pipe_desc *spd)
++{
++ if (!splice_to_pipe_sym)
++ splice_to_pipe_sym = (void *) kallsyms_lookup_funcptr("splice_to_pipe");
++ if (splice_to_pipe_sym) {
++ return splice_to_pipe_sym(pipe, spd);
++ } else {
++ printk(KERN_WARNING "LTTng: splice_to_pipe symbol lookup failed.\n");
++ return -ENOSYS;
++ }
++}
++
++#else
++
++#include <linux/fs.h>
++#include <linux/splice.h>
++
++ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
++ struct splice_pipe_desc *spd)
++{
++ return splice_to_pipe(pipe, spd);
++}
++
++#endif
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/splice.h
+@@ -0,0 +1,37 @@
++#ifndef _LTTNG_WRAPPER_SPLICE_H
++#define _LTTNG_WRAPPER_SPLICE_H
++
++/*
++ * wrapper/splice.h
++ *
++ * wrapper around splice_to_pipe. Using KALLSYMS to get its address when
++ * available, else we need to have a kernel that exports this function to GPL
++ * modules.
++ *
++ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/splice.h>
++
++ssize_t wrapper_splice_to_pipe(struct pipe_inode_info *pipe,
++ struct splice_pipe_desc *spd);
++
++#ifndef PIPE_DEF_BUFFERS
++#define PIPE_DEF_BUFFERS 16
++#endif
++
++#endif /* _LTTNG_WRAPPER_SPLICE_H */
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/trace-clock.h
+@@ -0,0 +1,102 @@
++#ifndef _LTTNG_TRACE_CLOCK_H
++#define _LTTNG_TRACE_CLOCK_H
++
++/*
++ * wrapper/trace-clock.h
++ *
++ * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic
++ * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y.
++ *
++ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#ifdef CONFIG_HAVE_TRACE_CLOCK
++#include <linux/trace-clock.h>
++#else /* CONFIG_HAVE_TRACE_CLOCK */
++
++#include <linux/hardirq.h>
++#include <linux/ktime.h>
++#include <linux/time.h>
++#include <linux/hrtimer.h>
++#include <linux/version.h>
++#include "../lttng-kernel-version.h"
++#include "random.h"
++
++#if LTTNG_KERNEL_RANGE(3,10,0, 3,10,14) || LTTNG_KERNEL_RANGE(3,11,0, 3,11,3)
++#error "Linux kernels 3.10 and 3.11 introduce a deadlock in the timekeeping subsystem. Fixed by commit 7bd36014460f793c19e7d6c94dab67b0afcfcb7f \"timekeeping: Fix HRTICK related deadlock from ntp lock changes\" in Linux."
++#endif
++
++static inline u64 trace_clock_monotonic_wrapper(void)
++{
++ ktime_t ktime;
++
++ /*
++ * Refuse to trace from NMIs with this wrapper, because an NMI could
++ * nest over the xtime write seqlock and deadlock.
++ */
++ if (in_nmi())
++ return (u64) -EIO;
++
++ ktime = ktime_get();
++ return ktime_to_ns(ktime);
++}
++
++static inline u32 trace_clock_read32(void)
++{
++ return (u32) trace_clock_monotonic_wrapper();
++}
++
++static inline u64 trace_clock_read64(void)
++{
++ return (u64) trace_clock_monotonic_wrapper();
++}
++
++static inline u64 trace_clock_freq(void)
++{
++ return (u64) NSEC_PER_SEC;
++}
++
++static inline int trace_clock_uuid(char *uuid)
++{
++ return wrapper_get_bootid(uuid);
++}
++
++static inline int get_trace_clock(void)
++{
++ /*
++ * LTTng: Using mainline kernel monotonic clock. NMIs will not be
++ * traced, and expect significant performance degradation compared to
++ * the LTTng trace clocks. Integration of the LTTng 0.x trace clocks
++ * into LTTng 2.0 is planned in a near future.
++ */
++ printk(KERN_WARNING "LTTng: Using mainline kernel monotonic clock.\n");
++ printk(KERN_WARNING " * NMIs will not be traced,\n");
++ printk(KERN_WARNING " * expect significant performance degradation compared to the\n");
++ printk(KERN_WARNING " LTTng trace clocks.\n");
++ printk(KERN_WARNING "Integration of the LTTng 0.x trace clocks into LTTng 2.0 is planned\n");
++ printk(KERN_WARNING "in a near future.\n");
++
++ return 0;
++}
++
++static inline void put_trace_clock(void)
++{
++}
++
++#endif /* CONFIG_HAVE_TRACE_CLOCK */
++
++#endif /* _LTTNG_TRACE_CLOCK_H */
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/tracepoint.h
+@@ -0,0 +1,44 @@
++#ifndef _LTTNG_WRAPPER_TRACEPOINT_H
++#define _LTTNG_WRAPPER_TRACEPOINT_H
++
++/*
++ * wrapper/tracepoint.h
++ *
++ * wrapper around DECLARE_EVENT_CLASS.
++ *
++ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/version.h>
++#include <linux/tracepoint.h>
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35))
++
++#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print)
++
++#endif
++
++#ifndef HAVE_KABI_2635_TRACEPOINT
++
++#define kabi_2635_tracepoint_probe_register tracepoint_probe_register
++#define kabi_2635_tracepoint_probe_unregister tracepoint_probe_unregister
++#define kabi_2635_tracepoint_probe_register_noupdate tracepoint_probe_register_noupdate
++#define kabi_2635_tracepoint_probe_unregister_noupdate tracepoint_probe_unregister_noupdate
++
++#endif /* HAVE_KABI_2635_TRACEPOINT */
++
++#endif /* _LTTNG_WRAPPER_TRACEPOINT_H */
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/uuid.h
+@@ -0,0 +1,43 @@
++#ifndef _LTTNG_WRAPPER_UUID_H
++#define _LTTNG_WRAPPER_UUID_H
++
++/*
++ * wrapper/uuid.h
++ *
++ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#include <linux/version.h>
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
++#include <linux/uuid.h>
++#else
++
++#include <linux/random.h>
++
++typedef struct {
++ __u8 b[16];
++} uuid_le;
++
++static inline
++void uuid_le_gen(uuid_le *u)
++{
++ generate_random_uuid(u->b);
++}
++
++#endif
++#endif /* _LTTNG_WRAPPER_UUID_H */
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/vmalloc.h
+@@ -0,0 +1,63 @@
++#ifndef _LTTNG_WRAPPER_VMALLOC_H
++#define _LTTNG_WRAPPER_VMALLOC_H
++
++/*
++ * wrapper/vmalloc.h
++ *
++ * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
++ * available, else we need to have a kernel that exports this function to GPL
++ * modules.
++ *
++ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#ifdef CONFIG_KALLSYMS
++
++#include <linux/kallsyms.h>
++#include "kallsyms.h"
++
++static inline
++void wrapper_vmalloc_sync_all(void)
++{
++ void (*vmalloc_sync_all_sym)(void);
++
++ vmalloc_sync_all_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_all");
++ if (vmalloc_sync_all_sym) {
++ vmalloc_sync_all_sym();
++ } else {
++#ifdef CONFIG_X86
++ /*
++ * Only x86 needs vmalloc_sync_all to make sure LTTng does not
++ * trigger recursive page faults.
++ */
++ printk(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n");
++ printk(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
++#endif
++ }
++}
++#else
++
++#include <linux/vmalloc.h>
++
++static inline
++void wrapper_vmalloc_sync_all(void)
++{
++ return vmalloc_sync_all();
++}
++#endif
++
++#endif /* _LTTNG_WRAPPER_VMALLOC_H */
+--- /dev/null
++++ b/drivers/staging/lttng/wrapper/writeback.h
+@@ -0,0 +1,61 @@
++#ifndef _LTTNG_WRAPPER_WRITEBACK_H
++#define _LTTNG_WRAPPER_WRITEBACK_H
++
++/*
++ * wrapper/writeback.h
++ *
++ * wrapper around global_dirty_limit read. Using KALLSYMS with KALLSYMS_ALL
++ * to get its address when available, else we need to have a kernel that
++ * exports this variable to GPL modules.
++ *
++ * Copyright (C) 2013 Mentor Graphics Corp.
++ *
++ * This library is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU Lesser General Public
++ * License as published by the Free Software Foundation; only
++ * version 2.1 of the License.
++ *
++ * This library is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * Lesser General Public License for more details.
++ *
++ * You should have received a copy of the GNU Lesser General Public
++ * License along with this library; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
++ */
++
++#ifdef CONFIG_KALLSYMS_ALL
++
++#include <linux/kallsyms.h>
++#include "kallsyms.h"
++
++static unsigned long *global_dirty_limit_sym;
++
++static inline
++unsigned long wrapper_global_dirty_limit(void)
++{
++ if (!global_dirty_limit_sym)
++ global_dirty_limit_sym =
++ (void *) kallsyms_lookup_dataptr("global_dirty_limit");
++ if (global_dirty_limit_sym) {
++ return *global_dirty_limit_sym;
++ } else {
++ printk(KERN_WARNING "LTTng: global_dirty_limit symbol lookup failed.\n");
++ return 0;
++ }
++}
++
++#else
++
++#include <linux/writeback.h>
++
++static inline
++unsigned long wrapper_global_dirty_limit(void)
++{
++ return global_dirty_limit;
++}
++
++#endif
++
++#endif /* _LTTNG_WRAPPER_WRITEBACK_H */
diff --git a/patches.lttng/lttng-fix-module-name-lttng-relay.ko-lttng-tracer.ko.patch b/patches.lttng/lttng-fix-module-name-lttng-relay.ko-lttng-tracer.ko.patch
deleted file mode 100644
index 06be73f13a4..00000000000
--- a/patches.lttng/lttng-fix-module-name-lttng-relay.ko-lttng-tracer.ko.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-From ltsi-dev-bounces@lists.linuxfoundation.org Sun Jul 8 18:24:03 2012
-From: UCHINO Satoshi <satoshi.uchino@toshiba.co.jp>
-Date: Mon, 09 Jul 2012 10:23:55 +0900 (東京 (標準時))
-Subject: LTTng: fix module name: lttng-relay.ko => lttng-tracer.ko
-To: ltsi-dev@lists.linuxfoundation.org
-Cc: mathieu.desnoyers@efficios.com
-Message-ID: <20120709.102355.248509328.satoshi.uchino@toshiba.co.jp>
-
-
-It seems lttng-tools 2.0 expects lttng-tracer.ko instead of
-lttng-relay.ko.
-
-Signed-off-by: UCHINO Satoshi <satoshi.uchino@toshiba.co.jp>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
----
- drivers/staging/lttng/Makefile | 8 ++++----
- 1 files changed, 4 insertions(+), 4 deletions(-)
-
-diff --git a/drivers/staging/lttng/Makefile b/drivers/staging/lttng/Makefile
-index 887e8a5..0f67d31 100644
---- a/drivers/staging/lttng/Makefile
-+++ b/drivers/staging/lttng/Makefile
-@@ -9,8 +9,8 @@ obj-m += lttng-ring-buffer-client-mmap-discard.o
- obj-m += lttng-ring-buffer-client-mmap-overwrite.o
- obj-m += lttng-ring-buffer-metadata-mmap-client.o
-
--obj-m += lttng-relay.o
--lttng-relay-objs := lttng-events.o lttng-abi.o \
-+obj-m += lttng-tracer.o
-+lttng-tracer-objs := lttng-events.o lttng-abi.o \
- lttng-probes.o lttng-context.o \
- lttng-context-pid.o lttng-context-procname.o \
- lttng-context-prio.o lttng-context-nice.o \
-@@ -23,11 +23,11 @@ obj-m += lttng-statedump.o
- lttng-statedump-objs := lttng-statedump-impl.o wrapper/irqdesc.o
-
- ifneq ($(CONFIG_HAVE_SYSCALL_TRACEPOINTS),)
--lttng-relay-objs += lttng-syscalls.o
-+lttng-tracer-objs += lttng-syscalls.o
- endif
-
- ifneq ($(CONFIG_PERF_EVENTS),)
--lttng-relay-objs += $(shell \
-+lttng-tracer-objs += $(shell \
- if [ $(VERSION) -ge 3 \
- -o \( $(VERSION) -eq 2 -a $(PATCHLEVEL) -ge 6 -a $(SUBLEVEL) -ge 33 \) ] ; then \
- echo "lttng-context-perf-counters.o" ; fi;)
---
-1.7.4.1
-
-_______________________________________________
-LTSI-dev mailing list
-LTSI-dev@lists.linuxfoundation.org
-https://lists.linuxfoundation.org/mailman/listinfo/ltsi-dev
-
diff --git a/patches.lttng/lttng-fix-reference-to-obsolete-rt-kconfig-variable.patch b/patches.lttng/lttng-fix-reference-to-obsolete-rt-kconfig-variable.patch
deleted file mode 100644
index 0f8501575bc..00000000000
--- a/patches.lttng/lttng-fix-reference-to-obsolete-rt-kconfig-variable.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-From ltsi-dev-bounces@lists.linuxfoundation.org Fri Feb 22 12:15:13 2013
-From: Paul Gortmaker <paul.gortmaker@windriver.com>
-Date: Fri, 22 Feb 2013 14:32:42 -0500
-Subject: lttng: fix reference to obsolete RT Kconfig variable.
-To: <ltsi-dev@lists.linuxfoundation.org>
-Message-ID: <1361561562-5136-1-git-send-email-paul.gortmaker@windriver.com>
-
-
-The preempt-rt patches no longer use CONFIG_PREEMPT_RT in
-the 3.4 (and newer) versions. So even though LTSI doesn't
-include RT, having this define present can lead to an easy
-to overlook bug for anyone who does try to layer RT onto
-the LTSI baseline.
-
-Update it to use the currently used define name by RT.
-
-Reported-by: Jim Somerville <Jim.Somerville@windriver.com>
-Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-
----
-
-[caused by LTSI patch patches.lttng/lttng-update-to-v2.0.1.patch]
-
- drivers/staging/lttng/lttng-events.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/staging/lttng/lttng-events.c
-+++ b/drivers/staging/lttng/lttng-events.c
-@@ -51,7 +51,7 @@ int _lttng_session_metadata_statedump(st
- void synchronize_trace(void)
- {
- synchronize_sched();
--#ifdef CONFIG_PREEMPT_RT
-+#ifdef CONFIG_PREEMPT_RT_FULL
- synchronize_rcu();
- #endif
- }
-
diff --git a/patches.lttng/lttng-update-2.0.1-to-2.0.4.patch b/patches.lttng/lttng-update-2.0.1-to-2.0.4.patch
deleted file mode 100644
index 7fb335cf0ce..00000000000
--- a/patches.lttng/lttng-update-2.0.1-to-2.0.4.patch
+++ /dev/null
@@ -1,204 +0,0 @@
-From compudj@mail.openrapids.net Tue Jun 26 23:52:58 2012
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Wed, 27 Jun 2012 02:52:55 -0400
-Subject: [LTSI staging] LTTng staging update 2.0.1 to 2.0.4
-To: Greg KH <gregkh@linuxfoundation.org>
-Cc: ltsi-dev@lists.linuxfoundation.org
-Message-ID: <20120627065255.GA6293@Krystal>
-Content-Disposition: inline
-
-
-Update LTTng in LTSI staging from lttng-modules 2.0.1 to 2.0.4.
-(available at: git://git.lttng.org/lttng-modules.git)
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
----
- drivers/staging/lttng/README | 43 +++++++++-
- drivers/staging/lttng/instrumentation/events/lttng-module/signal.h | 37 ++++++++
- drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-generate-headers.sh | 2
- drivers/staging/lttng/lttng-events.c | 2
- drivers/staging/lttng/lttng-tracer.h | 2
- drivers/staging/lttng/probes/lttng-events.h | 2
- drivers/staging/lttng/probes/lttng-probe-statedump.c | 1
- 7 files changed, 80 insertions(+), 9 deletions(-)
-
---- a/drivers/staging/lttng/README
-+++ b/drivers/staging/lttng/README
-@@ -1,7 +1,7 @@
- LTTng 2.0 modules
-
- Mathieu Desnoyers
--February 8, 2012
-+April 6, 2012
-
- LTTng 2.0 kernel modules build against a vanilla or distribution kernel, without
- need for additional patches. Other features:
-@@ -37,9 +37,44 @@ LTTng 0.x patchset, but the lttng-module
- 0.x, so both tracers cannot be installed at the same time for a given
- kernel version.
-
--LTTng-modules depends on having kallsyms enabled in the kernel it is
--built against. Ideally, if you want to have system call tracing, the
--"Trace Syscalls" feature should be enabled too.
-+
-+* Kernel config options required
-+
-+CONFIG_MODULES: required
-+ * Kernel modules support.
-+CONFIG_KALLSYMS: required
-+ * See wrapper/ files. This is necessary until the few required missing
-+ symbols are exported to GPL modules from mainline.
-+CONFIG_HIGH_RES_TIMERS: required
-+ * Needed for LTTng 2.0 clock source.
-+CONFIG_TRACEPOINTS: required
-+ kernel tracepoint instrumentation
-+ * Enabled as side-effect of any of the perf/ftrace/blktrace
-+ instrumentation features.
-+
-+
-+* Kernel config options supported (optional)
-+
-+The following kernel configuration options will affect the features
-+available from LTTng:
-+
-+
-+CONFIG_HAVE_SYSCALL_TRACEPOINTS:
-+ system call tracing
-+ lttng enable-event -k --syscall
-+ lttng enable-event -k -a
-+CONFIG_PERF_EVENTS:
-+ performance counters
-+ lttng add-context -t perf:*
-+CONFIG_EVENT_TRACING:
-+ needed to allow block layer tracing
-+CONFIG_KPROBES:
-+ Dynamic probe.
-+ lttng enable-event -k --probe ...
-+CONFIG_KRETPROBES:
-+ Dynamic function entry/return probe.
-+ lttng enable-event -k --function ...
-+
-
- * Note about Perf PMU counters support
-
---- a/drivers/staging/lttng/instrumentation/events/lttng-module/signal.h
-+++ b/drivers/staging/lttng/instrumentation/events/lttng-module/signal.h
-@@ -5,6 +5,7 @@
- #define _TRACE_SIGNAL_H
-
- #include <linux/tracepoint.h>
-+#include <linux/version.h>
-
- #ifndef _TRACE_SIGNAL_DEF
- #define _TRACE_SIGNAL_DEF
-@@ -34,6 +35,7 @@
- * SEND_SIG_NOINFO means that si_code is SI_USER, and SEND_SIG_PRIV
- * means that si_code is SI_KERNEL.
- */
-+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0))
- TRACE_EVENT(signal_generate,
-
- TP_PROTO(int sig, struct siginfo *info, struct task_struct *task),
-@@ -44,7 +46,7 @@ TRACE_EVENT(signal_generate,
- __field( int, sig )
- __field( int, errno )
- __field( int, code )
-- __array( char, comm, TASK_COMM_LEN )
-+ __array_text( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- ),
-
-@@ -59,6 +61,39 @@ TRACE_EVENT(signal_generate,
- __entry->sig, __entry->errno, __entry->code,
- __entry->comm, __entry->pid)
- )
-+#else
-+TRACE_EVENT(signal_generate,
-+
-+ TP_PROTO(int sig, struct siginfo *info, struct task_struct *task,
-+ int group, int result),
-+
-+ TP_ARGS(sig, info, task, group, result),
-+
-+ TP_STRUCT__entry(
-+ __field( int, sig )
-+ __field( int, errno )
-+ __field( int, code )
-+ __array_text( char, comm, TASK_COMM_LEN )
-+ __field( pid_t, pid )
-+ __field( int, group )
-+ __field( int, result )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(sig, sig)
-+ TP_STORE_SIGINFO(info)
-+ tp_memcpy(comm, task->comm, TASK_COMM_LEN)
-+ tp_assign(pid, task->pid)
-+ tp_assign(group, group)
-+ tp_assign(result, result)
-+ ),
-+
-+ TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d grp=%d res=%d",
-+ __entry->sig, __entry->errno, __entry->code,
-+ __entry->comm, __entry->pid, __entry->group,
-+ __entry->result)
-+)
-+#endif
-
- /**
- * signal_deliver - called when a signal is delivered
---- a/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-generate-headers.sh
-+++ b/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-generate-headers.sh
-@@ -59,7 +59,7 @@ if [ "$CLASS" = integers ]; then
-
- NRARGS=0
-
--echo \
-+echo -e \
- 'SC_DECLARE_EVENT_CLASS_NOARGS(syscalls_noargs,\n'\
- ' TP_STRUCT__entry(),\n'\
- ' TP_fast_assign(),\n'\
---- a/drivers/staging/lttng/lttng-events.c
-+++ b/drivers/staging/lttng/lttng-events.c
-@@ -292,7 +292,7 @@ struct lttng_event *lttng_event_create(s
- int ret;
-
- mutex_lock(&sessions_mutex);
-- if (chan->free_event_id == -1UL)
-+ if (chan->free_event_id == -1U)
- goto full;
- /*
- * This is O(n^2) (for each event, the loop is called at event
---- a/drivers/staging/lttng/lttng-tracer.h
-+++ b/drivers/staging/lttng/lttng-tracer.h
-@@ -40,7 +40,7 @@
-
- #define LTTNG_MODULES_MAJOR_VERSION 2
- #define LTTNG_MODULES_MINOR_VERSION 0
--#define LTTNG_MODULES_PATCHLEVEL_VERSION 1
-+#define LTTNG_MODULES_PATCHLEVEL_VERSION 4
-
- #define LTTNG_VERSION_NAME "Annedd'ale"
- #define LTTNG_VERSION_DESCRIPTION \
---- a/drivers/staging/lttng/probes/lttng-events.h
-+++ b/drivers/staging/lttng/probes/lttng-events.h
-@@ -18,7 +18,7 @@
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
--
-+#include <linux/uaccess.h>
- #include <linux/debugfs.h>
- #include "lttng.h"
- #include "lttng-types.h"
---- a/drivers/staging/lttng/probes/lttng-probe-statedump.c
-+++ b/drivers/staging/lttng/probes/lttng-probe-statedump.c
-@@ -27,6 +27,7 @@
- #include <linux/ip.h>
- #include <linux/netdevice.h>
- #include <linux/inetdevice.h>
-+#include <linux/sched.h>
- #include "../lttng-events.h"
-
- /*
diff --git a/patches.lttng/lttng-update-to-v2.0.1.patch b/patches.lttng/lttng-update-to-v2.0.1.patch
deleted file mode 100644
index e12c0f0510e..00000000000
--- a/patches.lttng/lttng-update-to-v2.0.1.patch
+++ /dev/null
@@ -1,14289 +0,0 @@
-From compudj@mail.openrapids.net Thu Apr 5 07:40:26 2012
-From: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Date: Thu, 5 Apr 2012 10:40:20 -0400
-Subject: [PATCH LTSI staging] LTTng update to v2.0.1
-To: Greg KH <gregkh@linuxfoundation.org>
-Cc: Tim Bird <tim.bird@am.sony.com>, ltsi-dev@lists.linuxfoundation.org
-Message-ID: <20120405144020.GA31482@Krystal>
-Content-Disposition: inline
-
-
-Update LTTng driver (in LTSI tree) to v2.0.1 from
-git://git.lttng.org/lttng-modules.git, the external module package on which
-development has continued.
-
-Changelog:
-
-2012-03-29 LTTng modules 2.0.1
- * Fix: is_compat_task !CONFIG_COMPAT compile error on kernels >= 3.3
-
-2012-03-20 LTTng modules 2.0.0
- * First STABLE version
- * Add version name
-
-2012-03-20 LTTng modules 2.0.0-rc4
- * Update README and add version name place-holder
-
-2012-03-16 LTTng modules 2.0.0-rc3
- * Fix clock offset 32-bit multiplication overflow
- * Fix : wrong assign of fd in state dump
- * License cleanup, ifdef namespace cleanup
- * Fix: ensure power of 2 check handles 64-bit size_t entirely
-
-2012-03-02 LTTng modules 2.0.0-rc2
- * Fix: dmesg printout should not print metadata warnings
- * Fix: use transport name as channel name
- * Fix: Return -EINVAL instead of print warning if non power of 2 size/num_subbuf
-
-2012-02-20 LTTng modules 2.0.0-rc1
- * Standardize version across toolchain
- * statedump: Use old macro name for kernel 2.6.38
-
-2012-02-16 LTTng modules 2.0-pre15
- * Add timer instrumentation
- * fix: need to undef mainline define
- * fix: Include signal.h instead of irq.h for prototype match check
- * Add signal instrumentation
-
-2012-02-16 LTTng modules 2.0-pre14
- * syscall tracing: sys_getcpu
- * Add sys_clone x86 instrumentation
- * statedump: fix include circular dep
- * Implement state dump
-
-2012-02-09 LTTng modules 2.0-pre13
- * Update README
- * environment: write sysname, release, version, domain to metadata
- * Allow open /proc/lttng for read & write
-
-2012-02-02 LTTng modules 2.0-pre12
- * Add x86 32/64 execve syscall instrumentation override
- * Remove unused defines
- * Add padding to ABI
- * Use LTTNG_KERNEL_SYM_NAME_LEN
- * Update version to 1.9.9
- * Add missing double-quotes to clock uuid
- * clock: read bootid as clock monotonic ID
- * Fix comment
- * Cleanup comment
- * clock: output clock description in metadata
- * Properly fix the timekeeping overflow detection
- * Fix init bug
- * rename lib-ring-buffer to lttng-lib-ring-buffer
- * Remove #warning
- * Mass rename: ltt_*/ltt-* to LTTNG_*/LTTNG-*
- * Update TODO
- * Update TODO
- * Remove debugfs file (keep only proc file)
- * Rename lttng-debugfs-abi files to lttng-abi
-
-2011-12-13 LTTng modules 2.0-pre11
- * Fix OOPS caused by reference of config pointer
- * Gather detailed info from x86 64 32-bit compat syscall instrumentation
- * lttng lib: ring buffer move null pointer check to open
- * lttng lib: ring buffer remove duplicate null pointer
- * lttng lib: ring buffer: remove stale null-pointer
- * lttng wrapper: add missing include to kallsyms wrapper
- * lttng: cleanup one-bit signed bitfields
- * Add TODO file
- * Update symbol name length max size to 256
- * Fix last modifications to string_from_user operations
- * Document that depmod needs to be executed by hand
- * Fix strlen_user fault space reservation
- * Fix tp_copy_string_from_user handling of faults
- * Disable block layer tracing support for kernels < 2.6.38
- * lttng context: perf counter, fix 32-bit vs 64-bit field size bug
- * Update trace clock warning to match the current development plan
- * ringbuffer: make ring buffer printk less verbose
- * Makefile: do not run depmod manually
-
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-CC: Greg KH <gregkh@linuxfoundation.org>
-CC: Tim Bird <tim.bird@am.sony.com>
-CC: ltsi-dev@lists.linuxfoundation.org
----
- drivers/staging/lttng/Makefile | 28
- drivers/staging/lttng/README | 43
- drivers/staging/lttng/TODO | 18
- drivers/staging/lttng/instrumentation/events/lttng-module/lttng-statedump.h | 162 +
- drivers/staging/lttng/instrumentation/events/lttng-module/signal.h | 165 +
- drivers/staging/lttng/instrumentation/events/lttng-module/timer.h | 333 ++
- drivers/staging/lttng/instrumentation/events/mainline/signal.h | 166 +
- drivers/staging/lttng/instrumentation/events/mainline/timer.h | 329 ++
- drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers_override.h | 53
- drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers_override.h | 46
- drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_integers_override.h | 3
- drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_pointers_override.h | 7
- drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/lttng-syscalls-extractor.c | 18
- drivers/staging/lttng/lib/Makefile | 4
- drivers/staging/lttng/lib/align.h | 16
- drivers/staging/lttng/lib/bitfield.h | 2
- drivers/staging/lttng/lib/bug.h | 16
- drivers/staging/lttng/lib/ringbuffer/api.h | 26
- drivers/staging/lttng/lib/ringbuffer/backend.h | 26
- drivers/staging/lttng/lib/ringbuffer/backend_internal.h | 26
- drivers/staging/lttng/lib/ringbuffer/backend_types.h | 33
- drivers/staging/lttng/lib/ringbuffer/config.h | 26
- drivers/staging/lttng/lib/ringbuffer/frontend.h | 28
- drivers/staging/lttng/lib/ringbuffer/frontend_api.h | 28
- drivers/staging/lttng/lib/ringbuffer/frontend_internal.h | 26
- drivers/staging/lttng/lib/ringbuffer/frontend_types.h | 34
- drivers/staging/lttng/lib/ringbuffer/iterator.h | 28
- drivers/staging/lttng/lib/ringbuffer/nohz.h | 24
- drivers/staging/lttng/lib/ringbuffer/ring_buffer_backend.c | 60
- drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c | 112
- drivers/staging/lttng/lib/ringbuffer/ring_buffer_iterator.c | 46
- drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c | 29
- drivers/staging/lttng/lib/ringbuffer/ring_buffer_splice.c | 23
- drivers/staging/lttng/lib/ringbuffer/ring_buffer_vfs.c | 24
- drivers/staging/lttng/lib/ringbuffer/vatomic.h | 24
- drivers/staging/lttng/lib/ringbuffer/vfs.h | 28
- drivers/staging/lttng/ltt-context.c | 93
- drivers/staging/lttng/ltt-debugfs-abi.c | 777 ------
- drivers/staging/lttng/ltt-debugfs-abi.h | 153 -
- drivers/staging/lttng/ltt-endian.h | 31
- drivers/staging/lttng/ltt-events.c | 1009 --------
- drivers/staging/lttng/ltt-events.h | 452 ----
- drivers/staging/lttng/ltt-probes.c | 164 -
- drivers/staging/lttng/ltt-ring-buffer-client-discard.c | 21
- drivers/staging/lttng/ltt-ring-buffer-client-mmap-discard.c | 21
- drivers/staging/lttng/ltt-ring-buffer-client-mmap-overwrite.c | 21
- drivers/staging/lttng/ltt-ring-buffer-client-overwrite.c | 21
- drivers/staging/lttng/ltt-ring-buffer-client.h | 569 -----
- drivers/staging/lttng/ltt-ring-buffer-metadata-client.c | 21
- drivers/staging/lttng/ltt-ring-buffer-metadata-client.h | 330 --
- drivers/staging/lttng/ltt-ring-buffer-metadata-mmap-client.c | 21
- drivers/staging/lttng/ltt-tracer-core.h | 28
- drivers/staging/lttng/ltt-tracer.h | 67
- drivers/staging/lttng/lttng-abi.c | 781 ++++++
- drivers/staging/lttng/lttng-abi.h | 176 +
- drivers/staging/lttng/lttng-calibrate.c | 22
- drivers/staging/lttng/lttng-context-nice.c | 31
- drivers/staging/lttng/lttng-context-perf-counters.c | 31
- drivers/staging/lttng/lttng-context-pid.c | 31
- drivers/staging/lttng/lttng-context-ppid.c | 31
- drivers/staging/lttng/lttng-context-prio.c | 31
- drivers/staging/lttng/lttng-context-procname.c | 27
- drivers/staging/lttng/lttng-context-tid.c | 31
- drivers/staging/lttng/lttng-context-vpid.c | 31
- drivers/staging/lttng/lttng-context-vppid.c | 31
- drivers/staging/lttng/lttng-context-vtid.c | 31
- drivers/staging/lttng/lttng-context.c | 105
- drivers/staging/lttng/lttng-endian.h | 43
- drivers/staging/lttng/lttng-events.c | 1126 ++++++++++
- drivers/staging/lttng/lttng-events.h | 466 ++++
- drivers/staging/lttng/lttng-probes.c | 176 +
- drivers/staging/lttng/lttng-ring-buffer-client-discard.c | 33
- drivers/staging/lttng/lttng-ring-buffer-client-mmap-discard.c | 33
- drivers/staging/lttng/lttng-ring-buffer-client-mmap-overwrite.c | 33
- drivers/staging/lttng/lttng-ring-buffer-client-overwrite.c | 33
- drivers/staging/lttng/lttng-ring-buffer-client.h | 598 +++++
- drivers/staging/lttng/lttng-ring-buffer-metadata-client.c | 33
- drivers/staging/lttng/lttng-ring-buffer-metadata-client.h | 342 +++
- drivers/staging/lttng/lttng-ring-buffer-metadata-mmap-client.c | 33
- drivers/staging/lttng/lttng-statedump-impl.c | 385 +++
- drivers/staging/lttng/lttng-syscalls.c | 69
- drivers/staging/lttng/lttng-tracer-core.h | 41
- drivers/staging/lttng/lttng-tracer.h | 80
- drivers/staging/lttng/probes/Makefile | 4
- drivers/staging/lttng/probes/define_trace.h | 16
- drivers/staging/lttng/probes/lttng-events-reset.h | 16
- drivers/staging/lttng/probes/lttng-events.h | 79
- drivers/staging/lttng/probes/lttng-ftrace.c | 43
- drivers/staging/lttng/probes/lttng-kprobes.c | 47
- drivers/staging/lttng/probes/lttng-kretprobes.c | 47
- drivers/staging/lttng/probes/lttng-probe-block.c | 18
- drivers/staging/lttng/probes/lttng-probe-irq.c | 18
- drivers/staging/lttng/probes/lttng-probe-kvm.c | 18
- drivers/staging/lttng/probes/lttng-probe-lttng.c | 18
- drivers/staging/lttng/probes/lttng-probe-sched.c | 18
- drivers/staging/lttng/probes/lttng-probe-signal.c | 42
- drivers/staging/lttng/probes/lttng-probe-statedump.c | 45
- drivers/staging/lttng/probes/lttng-probe-timer.c | 43
- drivers/staging/lttng/probes/lttng-type-list.h | 16
- drivers/staging/lttng/probes/lttng-types.c | 20
- drivers/staging/lttng/probes/lttng-types.h | 24
- drivers/staging/lttng/probes/lttng.h | 16
- drivers/staging/lttng/wrapper/ftrace.h | 24
- drivers/staging/lttng/wrapper/inline_memcpy.h | 16
- drivers/staging/lttng/wrapper/irqdesc.c | 58
- drivers/staging/lttng/wrapper/irqdesc.h | 33
- drivers/staging/lttng/wrapper/kallsyms.h | 45
- drivers/staging/lttng/wrapper/perf.h | 24
- drivers/staging/lttng/wrapper/poll.h | 24
- drivers/staging/lttng/wrapper/random.c | 77
- drivers/staging/lttng/wrapper/random.h | 32
- drivers/staging/lttng/wrapper/spinlock.h | 24
- drivers/staging/lttng/wrapper/splice.c | 20
- drivers/staging/lttng/wrapper/splice.h | 26
- drivers/staging/lttng/wrapper/trace-clock.h | 41
- drivers/staging/lttng/wrapper/uuid.h | 24
- drivers/staging/lttng/wrapper/vmalloc.h | 24
- 117 files changed, 7456 insertions(+), 4357 deletions(-)
-
---- a/drivers/staging/lttng/Makefile
-+++ b/drivers/staging/lttng/Makefile
-@@ -2,28 +2,32 @@
- # Makefile for the LTTng modules.
- #
-
--obj-m += ltt-ring-buffer-client-discard.o
--obj-m += ltt-ring-buffer-client-overwrite.o
--obj-m += ltt-ring-buffer-metadata-client.o
--obj-m += ltt-ring-buffer-client-mmap-discard.o
--obj-m += ltt-ring-buffer-client-mmap-overwrite.o
--obj-m += ltt-ring-buffer-metadata-mmap-client.o
-+obj-m += lttng-ring-buffer-client-discard.o
-+obj-m += lttng-ring-buffer-client-overwrite.o
-+obj-m += lttng-ring-buffer-metadata-client.o
-+obj-m += lttng-ring-buffer-client-mmap-discard.o
-+obj-m += lttng-ring-buffer-client-mmap-overwrite.o
-+obj-m += lttng-ring-buffer-metadata-mmap-client.o
-
--obj-m += ltt-relay.o
--ltt-relay-objs := ltt-events.o ltt-debugfs-abi.o \
-- ltt-probes.o ltt-context.o \
-+obj-m += lttng-relay.o
-+lttng-relay-objs := lttng-events.o lttng-abi.o \
-+ lttng-probes.o lttng-context.o \
- lttng-context-pid.o lttng-context-procname.o \
- lttng-context-prio.o lttng-context-nice.o \
- lttng-context-vpid.o lttng-context-tid.o \
- lttng-context-vtid.o lttng-context-ppid.o \
-- lttng-context-vppid.o lttng-calibrate.o
-+ lttng-context-vppid.o lttng-calibrate.o \
-+ wrapper/random.o
-+
-+obj-m += lttng-statedump.o
-+lttng-statedump-objs := lttng-statedump-impl.o wrapper/irqdesc.o
-
- ifneq ($(CONFIG_HAVE_SYSCALL_TRACEPOINTS),)
--ltt-relay-objs += lttng-syscalls.o
-+lttng-relay-objs += lttng-syscalls.o
- endif
-
- ifneq ($(CONFIG_PERF_EVENTS),)
--ltt-relay-objs += $(shell \
-+lttng-relay-objs += $(shell \
- if [ $(VERSION) -ge 3 \
- -o \( $(VERSION) -eq 2 -a $(PATCHLEVEL) -ge 6 -a $(SUBLEVEL) -ge 33 \) ] ; then \
- echo "lttng-context-perf-counters.o" ; fi;)
---- a/drivers/staging/lttng/README
-+++ b/drivers/staging/lttng/README
-@@ -1,10 +1,10 @@
- LTTng 2.0 modules
-
- Mathieu Desnoyers
--November 1st, 2011
-+February 8, 2012
-
--LTTng 2.0 kernel modules is currently part of the Linux kernel staging
--tree. It features (new features since LTTng 0.x):
-+LTTng 2.0 kernel modules build against a vanilla or distribution kernel, without
-+need for additional patches. Other features:
-
- - Produces CTF (Common Trace Format) natively,
- (http://www.efficios.com/ctf)
-@@ -17,28 +17,29 @@ tree. It features (new features since LT
- optional, specified on a per-tracing-session basis (except for
- timestamp and event id, which are mandatory).
-
--To build and install, you need to select "Staging" modules, and the
--LTTng kernel tracer.
-+To build and install, you will need to enable LTTng in your kernel
-+configuration.
-
--Use lttng-tools to control the tracer. LTTng tools should automatically
--load the kernel modules when needed. Use Babeltrace to print traces as a
-+Use lttng-tools to control the tracer. LTTng tools should automatically load
-+the kernel modules when needed. Use Babeltrace to print traces as a
- human-readable text log. These tools are available at the following URL:
- http://lttng.org/lttng2.0
-
--Please note that the LTTng-UST 2.0 (user-space tracing counterpart of
--LTTng 2.0) is now ready to be used, but still only available from the
--git repository.
--
--So far, it has been tested on vanilla Linux kernels 2.6.38, 2.6.39 and
--3.0 (on x86 32/64-bit, and powerpc 32-bit at the moment, build tested on
--ARM). It should work fine with newer kernels and other architectures,
--but expect build issues with kernels older than 2.6.36. The clock source
--currently used is the standard gettimeofday (slower, less scalable and
--less precise than the LTTng 0.x clocks). Support for LTTng 0.x clocks
--will be added back soon into LTTng 2.0. Please note that lttng-modules
--2.0 can build on a Linux kernel patched with the LTTng 0.x patchset, but
--the lttng-modules 2.0 replace the lttng-modules 0.x, so both tracers
--cannot be installed at the same time for a given kernel version.
-+So far, it has been tested on vanilla Linux kernels 2.6.38, 2.6.39, 3.0,
-+3.1, 3.2, 3.3 (on x86 32/64-bit, and powerpc 32-bit at the moment, build
-+tested on ARM). It should work fine with newer kernels and other
-+architectures, but expect build issues with kernels older than 2.6.36.
-+The clock source currently used is the standard gettimeofday (slower,
-+less scalable and less precise than the LTTng 0.x clocks). Support for
-+LTTng 0.x clocks will be added back soon into LTTng 2.0. Please note
-+that lttng-modules 2.0 can build on a Linux kernel patched with the
-+LTTng 0.x patchset, but the lttng-modules 2.0 replace the lttng-modules
-+0.x, so both tracers cannot be installed at the same time for a given
-+kernel version.
-+
-+LTTng-modules depends on having kallsyms enabled in the kernel it is
-+built against. Ideally, if you want to have system call tracing, the
-+"Trace Syscalls" feature should be enabled too.
-
- * Note about Perf PMU counters support
-
---- a/drivers/staging/lttng/TODO
-+++ b/drivers/staging/lttng/TODO
-@@ -10,20 +10,7 @@ TODO:
-
- A) Cleanup/Testing
-
-- 1) Remove debugfs "lttng" file (keep only procfs "lttng" file).
-- The rationale for this is that this file is needed for
-- user-level tracing support (LTTng-UST 2.0) intended to be
-- used on production system, and therefore should be present as
-- part of a "usually mounted" filesystem rather than a debug
-- filesystem.
--
-- 2) Cleanup wrappers. The drivers/staging/lttng/wrapper directory
-- contains various wrapper headers that use kallsyms lookups to
-- work around some missing EXPORT_SYMBOL_GPL() in the mainline
-- kernel. Ideally, those few symbols should become exported to
-- modules by the kernel.
--
-- 3) Test lib ring buffer snapshot feature.
-+ 1) Test lib ring buffer snapshot feature.
- When working on the lttngtop project, Julien Desfossez
- reported that he needed to push the consumer position
- forward explicitely with lib_ring_buffer_put_next_subbuf.
-@@ -70,7 +57,6 @@ B) Features
-
- 3) Integrate the "statedump" module from LTTng 0.x into LTTng
- 2.0.
-- * Dependency: addition of "dynamic enumerations" type to CTF.
- See: http://git.lttng.org/?p=lttng-modules.git;a=shortlog;h=refs/heads/v0.19-stable
- ltt-statedump.c
-
-@@ -107,7 +93,7 @@ B) Features
- allow integration between NOHZ and LTTng would be to add
- support for such notifiers into NOHZ kernel infrastructure.
-
-- 10) Turn drivers/staging/lttng/ltt-probes.c probe_list into a
-+ 10) Turn lttng-probes.c probe_list into a
- hash table. Turns O(n^2) trace systems registration (cost
- for n systems) into O(n). (O(1) per system)
-
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/events/lttng-module/lttng-statedump.h
-@@ -0,0 +1,162 @@
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM lttng_statedump
-+
-+#if !defined(_TRACE_LTTNG_STATEDUMP_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_LTTNG_STATEDUMP_H
-+
-+#include <linux/tracepoint.h>
-+
-+TRACE_EVENT(lttng_statedump_start,
-+ TP_PROTO(struct lttng_session *session),
-+ TP_ARGS(session),
-+ TP_STRUCT__entry(
-+ ),
-+ TP_fast_assign(
-+ ),
-+ TP_printk("")
-+)
-+
-+TRACE_EVENT(lttng_statedump_end,
-+ TP_PROTO(struct lttng_session *session),
-+ TP_ARGS(session),
-+ TP_STRUCT__entry(
-+ ),
-+ TP_fast_assign(
-+ ),
-+ TP_printk("")
-+)
-+
-+TRACE_EVENT(lttng_statedump_process_state,
-+ TP_PROTO(struct lttng_session *session,
-+ struct task_struct *p,
-+ int type, int mode, int submode, int status),
-+ TP_ARGS(session, p, type, mode, submode, status),
-+ TP_STRUCT__entry(
-+ __field(pid_t, tid)
-+ __field(pid_t, vtid)
-+ __field(pid_t, pid)
-+ __field(pid_t, vpid)
-+ __field(pid_t, ppid)
-+ __field(pid_t, vppid)
-+ __array_text(char, name, TASK_COMM_LEN)
-+ __field(int, type)
-+ __field(int, mode)
-+ __field(int, submode)
-+ __field(int, status)
-+ ),
-+ TP_fast_assign(
-+ tp_assign(tid, p->pid)
-+ tp_assign(vtid, !p->nsproxy ? 0 : task_pid_vnr(p))
-+ tp_assign(pid, p->tgid)
-+ tp_assign(vpid, !p->nsproxy ? 0 : task_tgid_vnr(p))
-+ tp_assign(ppid,
-+ ({
-+ pid_t ret;
-+
-+ rcu_read_lock();
-+ ret = task_tgid_nr(p->real_parent);
-+ rcu_read_unlock();
-+ ret;
-+ }))
-+ tp_assign(vppid,
-+ ({
-+ struct task_struct *parent;
-+ pid_t ret;
-+
-+ rcu_read_lock();
-+ parent = rcu_dereference(current->real_parent);
-+ if (!parent->nsproxy)
-+ ret = 0;
-+ else
-+ ret = task_tgid_nr(parent);
-+ rcu_read_unlock();
-+ ret;
-+ }))
-+ tp_memcpy(name, p->comm, TASK_COMM_LEN)
-+ tp_assign(type, type)
-+ tp_assign(mode, mode)
-+ tp_assign(submode, submode)
-+ tp_assign(status, status)
-+ ),
-+ TP_printk("")
-+)
-+
-+TRACE_EVENT(lttng_statedump_file_descriptor,
-+ TP_PROTO(struct lttng_session *session,
-+ struct task_struct *p, int fd, const char *filename),
-+ TP_ARGS(session, p, fd, filename),
-+ TP_STRUCT__entry(
-+ __field(pid_t, pid)
-+ __field(int, fd)
-+ __string(filename, filename)
-+ ),
-+ TP_fast_assign(
-+ tp_assign(pid, p->tgid)
-+ tp_assign(fd, fd)
-+ tp_strcpy(filename, filename)
-+ ),
-+ TP_printk("")
-+)
-+
-+TRACE_EVENT(lttng_statedump_vm_map,
-+ TP_PROTO(struct lttng_session *session,
-+ struct task_struct *p, struct vm_area_struct *map,
-+ unsigned long inode),
-+ TP_ARGS(session, p, map, inode),
-+ TP_STRUCT__entry(
-+ __field(pid_t, pid)
-+ __field_hex(unsigned long, start)
-+ __field_hex(unsigned long, end)
-+ __field_hex(unsigned long, flags)
-+ __field(unsigned long, inode)
-+ __field(unsigned long, pgoff)
-+ ),
-+ TP_fast_assign(
-+ tp_assign(pid, p->tgid)
-+ tp_assign(start, map->vm_start)
-+ tp_assign(end, map->vm_end)
-+ tp_assign(flags, map->vm_flags)
-+ tp_assign(inode, inode)
-+ tp_assign(pgoff, map->vm_pgoff << PAGE_SHIFT)
-+ ),
-+ TP_printk("")
-+)
-+
-+TRACE_EVENT(lttng_statedump_network_interface,
-+ TP_PROTO(struct lttng_session *session,
-+ struct net_device *dev, struct in_ifaddr *ifa),
-+ TP_ARGS(session, dev, ifa),
-+ TP_STRUCT__entry(
-+ __string(name, dev->name)
-+ __field_network_hex(uint32_t, address_ipv4)
-+ ),
-+ TP_fast_assign(
-+ tp_strcpy(name, dev->name)
-+ tp_assign(address_ipv4, ifa ? ifa->ifa_address : 0U)
-+ ),
-+ TP_printk("")
-+)
-+
-+/* Called with desc->lock held */
-+TRACE_EVENT(lttng_statedump_interrupt,
-+ TP_PROTO(struct lttng_session *session,
-+ unsigned int irq, const char *chip_name,
-+ struct irqaction *action),
-+ TP_ARGS(session, irq, chip_name, action),
-+ TP_STRUCT__entry(
-+ __field(unsigned int, irq)
-+ __string(name, chip_name)
-+ __string(action, action->name)
-+ ),
-+ TP_fast_assign(
-+ tp_assign(irq, irq)
-+ tp_strcpy(name, chip_name)
-+ tp_strcpy(action, action->name)
-+ ),
-+ TP_printk("")
-+)
-+
-+#endif /* _TRACE_LTTNG_STATEDUMP_H */
-+
-+/* This part must be outside protection */
-+#include "../../../probes/define_trace.h"
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/events/lttng-module/signal.h
-@@ -0,0 +1,165 @@
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM signal
-+
-+#if !defined(_TRACE_SIGNAL_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_SIGNAL_H
-+
-+#include <linux/tracepoint.h>
-+
-+#ifndef _TRACE_SIGNAL_DEF
-+#define _TRACE_SIGNAL_DEF
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#undef TP_STORE_SIGINFO
-+#define TP_STORE_SIGINFO(info) \
-+ tp_assign(errno, \
-+ (info == SEND_SIG_NOINFO || info == SEND_SIG_FORCED || info == SEND_SIG_PRIV) ? \
-+ 0 : \
-+ info->si_errno) \
-+ tp_assign(code, \
-+ (info == SEND_SIG_NOINFO || info == SEND_SIG_FORCED) ? \
-+ SI_USER : \
-+ ((info == SEND_SIG_PRIV) ? SI_KERNEL : info->si_code))
-+#endif /* _TRACE_SIGNAL_DEF */
-+
-+/**
-+ * signal_generate - called when a signal is generated
-+ * @sig: signal number
-+ * @info: pointer to struct siginfo
-+ * @task: pointer to struct task_struct
-+ *
-+ * Current process sends a 'sig' signal to 'task' process with
-+ * 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV,
-+ * 'info' is not a pointer and you can't access its field. Instead,
-+ * SEND_SIG_NOINFO means that si_code is SI_USER, and SEND_SIG_PRIV
-+ * means that si_code is SI_KERNEL.
-+ */
-+TRACE_EVENT(signal_generate,
-+
-+ TP_PROTO(int sig, struct siginfo *info, struct task_struct *task),
-+
-+ TP_ARGS(sig, info, task),
-+
-+ TP_STRUCT__entry(
-+ __field( int, sig )
-+ __field( int, errno )
-+ __field( int, code )
-+ __array( char, comm, TASK_COMM_LEN )
-+ __field( pid_t, pid )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(sig, sig)
-+ TP_STORE_SIGINFO(info)
-+ tp_memcpy(comm, task->comm, TASK_COMM_LEN)
-+ tp_assign(pid, task->pid)
-+ ),
-+
-+ TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d",
-+ __entry->sig, __entry->errno, __entry->code,
-+ __entry->comm, __entry->pid)
-+)
-+
-+/**
-+ * signal_deliver - called when a signal is delivered
-+ * @sig: signal number
-+ * @info: pointer to struct siginfo
-+ * @ka: pointer to struct k_sigaction
-+ *
-+ * A 'sig' signal is delivered to current process with 'info' siginfo,
-+ * and it will be handled by 'ka'. ka->sa.sa_handler can be SIG_IGN or
-+ * SIG_DFL.
-+ * Note that some signals reported by signal_generate tracepoint can be
-+ * lost, ignored or modified (by debugger) before hitting this tracepoint.
-+ * This means, this can show which signals are actually delivered, but
-+ * matching generated signals and delivered signals may not be correct.
-+ */
-+TRACE_EVENT(signal_deliver,
-+
-+ TP_PROTO(int sig, struct siginfo *info, struct k_sigaction *ka),
-+
-+ TP_ARGS(sig, info, ka),
-+
-+ TP_STRUCT__entry(
-+ __field( int, sig )
-+ __field( int, errno )
-+ __field( int, code )
-+ __field( unsigned long, sa_handler )
-+ __field( unsigned long, sa_flags )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(sig, sig)
-+ TP_STORE_SIGINFO(info)
-+ tp_assign(sa_handler, (unsigned long)ka->sa.sa_handler)
-+ tp_assign(sa_flags, ka->sa.sa_flags)
-+ ),
-+
-+ TP_printk("sig=%d errno=%d code=%d sa_handler=%lx sa_flags=%lx",
-+ __entry->sig, __entry->errno, __entry->code,
-+ __entry->sa_handler, __entry->sa_flags)
-+)
-+
-+DECLARE_EVENT_CLASS(signal_queue_overflow,
-+
-+ TP_PROTO(int sig, int group, struct siginfo *info),
-+
-+ TP_ARGS(sig, group, info),
-+
-+ TP_STRUCT__entry(
-+ __field( int, sig )
-+ __field( int, group )
-+ __field( int, errno )
-+ __field( int, code )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(sig, sig)
-+ tp_assign(group, group)
-+ TP_STORE_SIGINFO(info)
-+ ),
-+
-+ TP_printk("sig=%d group=%d errno=%d code=%d",
-+ __entry->sig, __entry->group, __entry->errno, __entry->code)
-+)
-+
-+/**
-+ * signal_overflow_fail - called when signal queue is overflow
-+ * @sig: signal number
-+ * @group: signal to process group or not (bool)
-+ * @info: pointer to struct siginfo
-+ *
-+ * Kernel fails to generate 'sig' signal with 'info' siginfo, because
-+ * siginfo queue is overflow, and the signal is dropped.
-+ * 'group' is not 0 if the signal will be sent to a process group.
-+ * 'sig' is always one of RT signals.
-+ */
-+DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail,
-+
-+ TP_PROTO(int sig, int group, struct siginfo *info),
-+
-+ TP_ARGS(sig, group, info)
-+)
-+
-+/**
-+ * signal_lose_info - called when siginfo is lost
-+ * @sig: signal number
-+ * @group: signal to process group or not (bool)
-+ * @info: pointer to struct siginfo
-+ *
-+ * Kernel generates 'sig' signal but loses 'info' siginfo, because siginfo
-+ * queue is overflow.
-+ * 'group' is not 0 if the signal will be sent to a process group.
-+ * 'sig' is always one of non-RT signals.
-+ */
-+DEFINE_EVENT(signal_queue_overflow, signal_lose_info,
-+
-+ TP_PROTO(int sig, int group, struct siginfo *info),
-+
-+ TP_ARGS(sig, group, info)
-+)
-+
-+#endif /* _TRACE_SIGNAL_H */
-+
-+/* This part must be outside protection */
-+#include "../../../probes/define_trace.h"
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/events/lttng-module/timer.h
-@@ -0,0 +1,333 @@
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM timer
-+
-+#if !defined(_TRACE_TIMER_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_TIMER_H
-+
-+#include <linux/tracepoint.h>
-+
-+#ifndef _TRACE_TIMER_DEF_
-+#define _TRACE_TIMER_DEF_
-+#include <linux/hrtimer.h>
-+#include <linux/timer.h>
-+#endif /* _TRACE_TIMER_DEF_ */
-+
-+DECLARE_EVENT_CLASS(timer_class,
-+
-+ TP_PROTO(struct timer_list *timer),
-+
-+ TP_ARGS(timer),
-+
-+ TP_STRUCT__entry(
-+ __field( void *, timer )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(timer, timer)
-+ ),
-+
-+ TP_printk("timer=%p", __entry->timer)
-+)
-+
-+/**
-+ * timer_init - called when the timer is initialized
-+ * @timer: pointer to struct timer_list
-+ */
-+DEFINE_EVENT(timer_class, timer_init,
-+
-+ TP_PROTO(struct timer_list *timer),
-+
-+ TP_ARGS(timer)
-+)
-+
-+/**
-+ * timer_start - called when the timer is started
-+ * @timer: pointer to struct timer_list
-+ * @expires: the timers expiry time
-+ */
-+TRACE_EVENT(timer_start,
-+
-+ TP_PROTO(struct timer_list *timer, unsigned long expires),
-+
-+ TP_ARGS(timer, expires),
-+
-+ TP_STRUCT__entry(
-+ __field( void *, timer )
-+ __field( void *, function )
-+ __field( unsigned long, expires )
-+ __field( unsigned long, now )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(timer, timer)
-+ tp_assign(function, timer->function)
-+ tp_assign(expires, expires)
-+ tp_assign(now, jiffies)
-+ ),
-+
-+ TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]",
-+ __entry->timer, __entry->function, __entry->expires,
-+ (long)__entry->expires - __entry->now)
-+)
-+
-+/**
-+ * timer_expire_entry - called immediately before the timer callback
-+ * @timer: pointer to struct timer_list
-+ *
-+ * Allows to determine the timer latency.
-+ */
-+TRACE_EVENT(timer_expire_entry,
-+
-+ TP_PROTO(struct timer_list *timer),
-+
-+ TP_ARGS(timer),
-+
-+ TP_STRUCT__entry(
-+ __field( void *, timer )
-+ __field( unsigned long, now )
-+ __field( void *, function)
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(timer, timer)
-+ tp_assign(now, jiffies)
-+ tp_assign(function, timer->function)
-+ ),
-+
-+ TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now)
-+)
-+
-+/**
-+ * timer_expire_exit - called immediately after the timer callback returns
-+ * @timer: pointer to struct timer_list
-+ *
-+ * When used in combination with the timer_expire_entry tracepoint we can
-+ * determine the runtime of the timer callback function.
-+ *
-+ * NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might
-+ * be invalid. We solely track the pointer.
-+ */
-+DEFINE_EVENT(timer_class, timer_expire_exit,
-+
-+ TP_PROTO(struct timer_list *timer),
-+
-+ TP_ARGS(timer)
-+)
-+
-+/**
-+ * timer_cancel - called when the timer is canceled
-+ * @timer: pointer to struct timer_list
-+ */
-+DEFINE_EVENT(timer_class, timer_cancel,
-+
-+ TP_PROTO(struct timer_list *timer),
-+
-+ TP_ARGS(timer)
-+)
-+
-+/**
-+ * hrtimer_init - called when the hrtimer is initialized
-+ * @timer: pointer to struct hrtimer
-+ * @clockid: the hrtimers clock
-+ * @mode: the hrtimers mode
-+ */
-+TRACE_EVENT(hrtimer_init,
-+
-+ TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid,
-+ enum hrtimer_mode mode),
-+
-+ TP_ARGS(hrtimer, clockid, mode),
-+
-+ TP_STRUCT__entry(
-+ __field( void *, hrtimer )
-+ __field( clockid_t, clockid )
-+ __field( enum hrtimer_mode, mode )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(hrtimer, hrtimer)
-+ tp_assign(clockid, clockid)
-+ tp_assign(mode, mode)
-+ ),
-+
-+ TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
-+ __entry->clockid == CLOCK_REALTIME ?
-+ "CLOCK_REALTIME" : "CLOCK_MONOTONIC",
-+ __entry->mode == HRTIMER_MODE_ABS ?
-+ "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
-+)
-+
-+/**
-+ * hrtimer_start - called when the hrtimer is started
-+ * @timer: pointer to struct hrtimer
-+ */
-+TRACE_EVENT(hrtimer_start,
-+
-+ TP_PROTO(struct hrtimer *hrtimer),
-+
-+ TP_ARGS(hrtimer),
-+
-+ TP_STRUCT__entry(
-+ __field( void *, hrtimer )
-+ __field( void *, function )
-+ __field( s64, expires )
-+ __field( s64, softexpires )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(hrtimer, hrtimer)
-+ tp_assign(function, hrtimer->function)
-+ tp_assign(expires, hrtimer_get_expires(hrtimer).tv64)
-+ tp_assign(softexpires, hrtimer_get_softexpires(hrtimer).tv64)
-+ ),
-+
-+ TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu",
-+ __entry->hrtimer, __entry->function,
-+ (unsigned long long)ktime_to_ns((ktime_t) {
-+ .tv64 = __entry->expires }),
-+ (unsigned long long)ktime_to_ns((ktime_t) {
-+ .tv64 = __entry->softexpires }))
-+)
-+
-+/**
-+ * htimmer_expire_entry - called immediately before the hrtimer callback
-+ * @timer: pointer to struct hrtimer
-+ * @now: pointer to variable which contains current time of the
-+ * timers base.
-+ *
-+ * Allows to determine the timer latency.
-+ */
-+TRACE_EVENT(hrtimer_expire_entry,
-+
-+ TP_PROTO(struct hrtimer *hrtimer, ktime_t *now),
-+
-+ TP_ARGS(hrtimer, now),
-+
-+ TP_STRUCT__entry(
-+ __field( void *, hrtimer )
-+ __field( s64, now )
-+ __field( void *, function)
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(hrtimer, hrtimer)
-+ tp_assign(now, now->tv64)
-+ tp_assign(function, hrtimer->function)
-+ ),
-+
-+ TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function,
-+ (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now }))
-+)
-+
-+DECLARE_EVENT_CLASS(hrtimer_class,
-+
-+ TP_PROTO(struct hrtimer *hrtimer),
-+
-+ TP_ARGS(hrtimer),
-+
-+ TP_STRUCT__entry(
-+ __field( void *, hrtimer )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(hrtimer, hrtimer)
-+ ),
-+
-+ TP_printk("hrtimer=%p", __entry->hrtimer)
-+)
-+
-+/**
-+ * hrtimer_expire_exit - called immediately after the hrtimer callback returns
-+ * @timer: pointer to struct hrtimer
-+ *
-+ * When used in combination with the hrtimer_expire_entry tracepoint we can
-+ * determine the runtime of the callback function.
-+ */
-+DEFINE_EVENT(hrtimer_class, hrtimer_expire_exit,
-+
-+ TP_PROTO(struct hrtimer *hrtimer),
-+
-+ TP_ARGS(hrtimer)
-+)
-+
-+/**
-+ * hrtimer_cancel - called when the hrtimer is canceled
-+ * @hrtimer: pointer to struct hrtimer
-+ */
-+DEFINE_EVENT(hrtimer_class, hrtimer_cancel,
-+
-+ TP_PROTO(struct hrtimer *hrtimer),
-+
-+ TP_ARGS(hrtimer)
-+)
-+
-+/**
-+ * itimer_state - called when itimer is started or canceled
-+ * @which: name of the interval timer
-+ * @value: the itimers value, itimer is canceled if value->it_value is
-+ * zero, otherwise it is started
-+ * @expires: the itimers expiry time
-+ */
-+TRACE_EVENT(itimer_state,
-+
-+ TP_PROTO(int which, const struct itimerval *const value,
-+ cputime_t expires),
-+
-+ TP_ARGS(which, value, expires),
-+
-+ TP_STRUCT__entry(
-+ __field( int, which )
-+ __field( cputime_t, expires )
-+ __field( long, value_sec )
-+ __field( long, value_usec )
-+ __field( long, interval_sec )
-+ __field( long, interval_usec )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(which, which)
-+ tp_assign(expires, expires)
-+ tp_assign(value_sec, value->it_value.tv_sec)
-+ tp_assign(value_usec, value->it_value.tv_usec)
-+ tp_assign(interval_sec, value->it_interval.tv_sec)
-+ tp_assign(interval_usec, value->it_interval.tv_usec)
-+ ),
-+
-+ TP_printk("which=%d expires=%llu it_value=%ld.%ld it_interval=%ld.%ld",
-+ __entry->which, (unsigned long long)__entry->expires,
-+ __entry->value_sec, __entry->value_usec,
-+ __entry->interval_sec, __entry->interval_usec)
-+)
-+
-+/**
-+ * itimer_expire - called when itimer expires
-+ * @which: type of the interval timer
-+ * @pid: pid of the process which owns the timer
-+ * @now: current time, used to calculate the latency of itimer
-+ */
-+TRACE_EVENT(itimer_expire,
-+
-+ TP_PROTO(int which, struct pid *pid, cputime_t now),
-+
-+ TP_ARGS(which, pid, now),
-+
-+ TP_STRUCT__entry(
-+ __field( int , which )
-+ __field( pid_t, pid )
-+ __field( cputime_t, now )
-+ ),
-+
-+ TP_fast_assign(
-+ tp_assign(which, which)
-+ tp_assign(now, now)
-+ tp_assign(pid, pid_nr(pid))
-+ ),
-+
-+ TP_printk("which=%d pid=%d now=%llu", __entry->which,
-+ (int) __entry->pid, (unsigned long long)__entry->now)
-+)
-+
-+#endif /* _TRACE_TIMER_H */
-+
-+/* This part must be outside protection */
-+#include "../../../probes/define_trace.h"
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/events/mainline/signal.h
-@@ -0,0 +1,166 @@
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM signal
-+
-+#if !defined(_TRACE_SIGNAL_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_SIGNAL_H
-+
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/tracepoint.h>
-+
-+#define TP_STORE_SIGINFO(__entry, info) \
-+ do { \
-+ if (info == SEND_SIG_NOINFO || \
-+ info == SEND_SIG_FORCED) { \
-+ __entry->errno = 0; \
-+ __entry->code = SI_USER; \
-+ } else if (info == SEND_SIG_PRIV) { \
-+ __entry->errno = 0; \
-+ __entry->code = SI_KERNEL; \
-+ } else { \
-+ __entry->errno = info->si_errno; \
-+ __entry->code = info->si_code; \
-+ } \
-+ } while (0)
-+
-+/**
-+ * signal_generate - called when a signal is generated
-+ * @sig: signal number
-+ * @info: pointer to struct siginfo
-+ * @task: pointer to struct task_struct
-+ *
-+ * Current process sends a 'sig' signal to 'task' process with
-+ * 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV,
-+ * 'info' is not a pointer and you can't access its field. Instead,
-+ * SEND_SIG_NOINFO means that si_code is SI_USER, and SEND_SIG_PRIV
-+ * means that si_code is SI_KERNEL.
-+ */
-+TRACE_EVENT(signal_generate,
-+
-+ TP_PROTO(int sig, struct siginfo *info, struct task_struct *task),
-+
-+ TP_ARGS(sig, info, task),
-+
-+ TP_STRUCT__entry(
-+ __field( int, sig )
-+ __field( int, errno )
-+ __field( int, code )
-+ __array( char, comm, TASK_COMM_LEN )
-+ __field( pid_t, pid )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->sig = sig;
-+ TP_STORE_SIGINFO(__entry, info);
-+ memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
-+ __entry->pid = task->pid;
-+ ),
-+
-+ TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d",
-+ __entry->sig, __entry->errno, __entry->code,
-+ __entry->comm, __entry->pid)
-+);
-+
-+/**
-+ * signal_deliver - called when a signal is delivered
-+ * @sig: signal number
-+ * @info: pointer to struct siginfo
-+ * @ka: pointer to struct k_sigaction
-+ *
-+ * A 'sig' signal is delivered to current process with 'info' siginfo,
-+ * and it will be handled by 'ka'. ka->sa.sa_handler can be SIG_IGN or
-+ * SIG_DFL.
-+ * Note that some signals reported by signal_generate tracepoint can be
-+ * lost, ignored or modified (by debugger) before hitting this tracepoint.
-+ * This means, this can show which signals are actually delivered, but
-+ * matching generated signals and delivered signals may not be correct.
-+ */
-+TRACE_EVENT(signal_deliver,
-+
-+ TP_PROTO(int sig, struct siginfo *info, struct k_sigaction *ka),
-+
-+ TP_ARGS(sig, info, ka),
-+
-+ TP_STRUCT__entry(
-+ __field( int, sig )
-+ __field( int, errno )
-+ __field( int, code )
-+ __field( unsigned long, sa_handler )
-+ __field( unsigned long, sa_flags )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->sig = sig;
-+ TP_STORE_SIGINFO(__entry, info);
-+ __entry->sa_handler = (unsigned long)ka->sa.sa_handler;
-+ __entry->sa_flags = ka->sa.sa_flags;
-+ ),
-+
-+ TP_printk("sig=%d errno=%d code=%d sa_handler=%lx sa_flags=%lx",
-+ __entry->sig, __entry->errno, __entry->code,
-+ __entry->sa_handler, __entry->sa_flags)
-+);
-+
-+DECLARE_EVENT_CLASS(signal_queue_overflow,
-+
-+ TP_PROTO(int sig, int group, struct siginfo *info),
-+
-+ TP_ARGS(sig, group, info),
-+
-+ TP_STRUCT__entry(
-+ __field( int, sig )
-+ __field( int, group )
-+ __field( int, errno )
-+ __field( int, code )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->sig = sig;
-+ __entry->group = group;
-+ TP_STORE_SIGINFO(__entry, info);
-+ ),
-+
-+ TP_printk("sig=%d group=%d errno=%d code=%d",
-+ __entry->sig, __entry->group, __entry->errno, __entry->code)
-+);
-+
-+/**
-+ * signal_overflow_fail - called when signal queue is overflow
-+ * @sig: signal number
-+ * @group: signal to process group or not (bool)
-+ * @info: pointer to struct siginfo
-+ *
-+ * Kernel fails to generate 'sig' signal with 'info' siginfo, because
-+ * siginfo queue is overflow, and the signal is dropped.
-+ * 'group' is not 0 if the signal will be sent to a process group.
-+ * 'sig' is always one of RT signals.
-+ */
-+DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail,
-+
-+ TP_PROTO(int sig, int group, struct siginfo *info),
-+
-+ TP_ARGS(sig, group, info)
-+);
-+
-+/**
-+ * signal_lose_info - called when siginfo is lost
-+ * @sig: signal number
-+ * @group: signal to process group or not (bool)
-+ * @info: pointer to struct siginfo
-+ *
-+ * Kernel generates 'sig' signal but loses 'info' siginfo, because siginfo
-+ * queue is overflow.
-+ * 'group' is not 0 if the signal will be sent to a process group.
-+ * 'sig' is always one of non-RT signals.
-+ */
-+DEFINE_EVENT(signal_queue_overflow, signal_lose_info,
-+
-+ TP_PROTO(int sig, int group, struct siginfo *info),
-+
-+ TP_ARGS(sig, group, info)
-+);
-+
-+#endif /* _TRACE_SIGNAL_H */
-+
-+/* This part must be outside protection */
-+#include <trace/define_trace.h>
---- /dev/null
-+++ b/drivers/staging/lttng/instrumentation/events/mainline/timer.h
-@@ -0,0 +1,329 @@
-+#undef TRACE_SYSTEM
-+#define TRACE_SYSTEM timer
-+
-+#if !defined(_TRACE_TIMER_H) || defined(TRACE_HEADER_MULTI_READ)
-+#define _TRACE_TIMER_H
-+
-+#include <linux/tracepoint.h>
-+#include <linux/hrtimer.h>
-+#include <linux/timer.h>
-+
-+DECLARE_EVENT_CLASS(timer_class,
-+
-+ TP_PROTO(struct timer_list *timer),
-+
-+ TP_ARGS(timer),
-+
-+ TP_STRUCT__entry(
-+ __field( void *, timer )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->timer = timer;
-+ ),
-+
-+ TP_printk("timer=%p", __entry->timer)
-+);
-+
-+/**
-+ * timer_init - called when the timer is initialized
-+ * @timer: pointer to struct timer_list
-+ */
-+DEFINE_EVENT(timer_class, timer_init,
-+
-+ TP_PROTO(struct timer_list *timer),
-+
-+ TP_ARGS(timer)
-+);
-+
-+/**
-+ * timer_start - called when the timer is started
-+ * @timer: pointer to struct timer_list
-+ * @expires: the timers expiry time
-+ */
-+TRACE_EVENT(timer_start,
-+
-+ TP_PROTO(struct timer_list *timer, unsigned long expires),
-+
-+ TP_ARGS(timer, expires),
-+
-+ TP_STRUCT__entry(
-+ __field( void *, timer )
-+ __field( void *, function )
-+ __field( unsigned long, expires )
-+ __field( unsigned long, now )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->timer = timer;
-+ __entry->function = timer->function;
-+ __entry->expires = expires;
-+ __entry->now = jiffies;
-+ ),
-+
-+ TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]",
-+ __entry->timer, __entry->function, __entry->expires,
-+ (long)__entry->expires - __entry->now)
-+);
-+
-+/**
-+ * timer_expire_entry - called immediately before the timer callback
-+ * @timer: pointer to struct timer_list
-+ *
-+ * Allows to determine the timer latency.
-+ */
-+TRACE_EVENT(timer_expire_entry,
-+
-+ TP_PROTO(struct timer_list *timer),
-+
-+ TP_ARGS(timer),
-+
-+ TP_STRUCT__entry(
-+ __field( void *, timer )
-+ __field( unsigned long, now )
-+ __field( void *, function)
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->timer = timer;
-+ __entry->now = jiffies;
-+ __entry->function = timer->function;
-+ ),
-+
-+ TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now)
-+);
-+
-+/**
-+ * timer_expire_exit - called immediately after the timer callback returns
-+ * @timer: pointer to struct timer_list
-+ *
-+ * When used in combination with the timer_expire_entry tracepoint we can
-+ * determine the runtime of the timer callback function.
-+ *
-+ * NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might
-+ * be invalid. We solely track the pointer.
-+ */
-+DEFINE_EVENT(timer_class, timer_expire_exit,
-+
-+ TP_PROTO(struct timer_list *timer),
-+
-+ TP_ARGS(timer)
-+);
-+
-+/**
-+ * timer_cancel - called when the timer is canceled
-+ * @timer: pointer to struct timer_list
-+ */
-+DEFINE_EVENT(timer_class, timer_cancel,
-+
-+ TP_PROTO(struct timer_list *timer),
-+
-+ TP_ARGS(timer)
-+);
-+
-+/**
-+ * hrtimer_init - called when the hrtimer is initialized
-+ * @timer: pointer to struct hrtimer
-+ * @clockid: the hrtimers clock
-+ * @mode: the hrtimers mode
-+ */
-+TRACE_EVENT(hrtimer_init,
-+
-+ TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid,
-+ enum hrtimer_mode mode),
-+
-+ TP_ARGS(hrtimer, clockid, mode),
-+
-+ TP_STRUCT__entry(
-+ __field( void *, hrtimer )
-+ __field( clockid_t, clockid )
-+ __field( enum hrtimer_mode, mode )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->hrtimer = hrtimer;
-+ __entry->clockid = clockid;
-+ __entry->mode = mode;
-+ ),
-+
-+ TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
-+ __entry->clockid == CLOCK_REALTIME ?
-+ "CLOCK_REALTIME" : "CLOCK_MONOTONIC",
-+ __entry->mode == HRTIMER_MODE_ABS ?
-+ "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
-+);
-+
-+/**
-+ * hrtimer_start - called when the hrtimer is started
-+ * @timer: pointer to struct hrtimer
-+ */
-+TRACE_EVENT(hrtimer_start,
-+
-+ TP_PROTO(struct hrtimer *hrtimer),
-+
-+ TP_ARGS(hrtimer),
-+
-+ TP_STRUCT__entry(
-+ __field( void *, hrtimer )
-+ __field( void *, function )
-+ __field( s64, expires )
-+ __field( s64, softexpires )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->hrtimer = hrtimer;
-+ __entry->function = hrtimer->function;
-+ __entry->expires = hrtimer_get_expires(hrtimer).tv64;
-+ __entry->softexpires = hrtimer_get_softexpires(hrtimer).tv64;
-+ ),
-+
-+ TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu",
-+ __entry->hrtimer, __entry->function,
-+ (unsigned long long)ktime_to_ns((ktime_t) {
-+ .tv64 = __entry->expires }),
-+ (unsigned long long)ktime_to_ns((ktime_t) {
-+ .tv64 = __entry->softexpires }))
-+);
-+
-+/**
-+ * htimmer_expire_entry - called immediately before the hrtimer callback
-+ * @timer: pointer to struct hrtimer
-+ * @now: pointer to variable which contains current time of the
-+ * timers base.
-+ *
-+ * Allows to determine the timer latency.
-+ */
-+TRACE_EVENT(hrtimer_expire_entry,
-+
-+ TP_PROTO(struct hrtimer *hrtimer, ktime_t *now),
-+
-+ TP_ARGS(hrtimer, now),
-+
-+ TP_STRUCT__entry(
-+ __field( void *, hrtimer )
-+ __field( s64, now )
-+ __field( void *, function)
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->hrtimer = hrtimer;
-+ __entry->now = now->tv64;
-+ __entry->function = hrtimer->function;
-+ ),
-+
-+ TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function,
-+ (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now }))
-+ );
-+
-+DECLARE_EVENT_CLASS(hrtimer_class,
-+
-+ TP_PROTO(struct hrtimer *hrtimer),
-+
-+ TP_ARGS(hrtimer),
-+
-+ TP_STRUCT__entry(
-+ __field( void *, hrtimer )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->hrtimer = hrtimer;
-+ ),
-+
-+ TP_printk("hrtimer=%p", __entry->hrtimer)
-+);
-+
-+/**
-+ * hrtimer_expire_exit - called immediately after the hrtimer callback returns
-+ * @timer: pointer to struct hrtimer
-+ *
-+ * When used in combination with the hrtimer_expire_entry tracepoint we can
-+ * determine the runtime of the callback function.
-+ */
-+DEFINE_EVENT(hrtimer_class, hrtimer_expire_exit,
-+
-+ TP_PROTO(struct hrtimer *hrtimer),
-+
-+ TP_ARGS(hrtimer)
-+);
-+
-+/**
-+ * hrtimer_cancel - called when the hrtimer is canceled
-+ * @hrtimer: pointer to struct hrtimer
-+ */
-+DEFINE_EVENT(hrtimer_class, hrtimer_cancel,
-+
-+ TP_PROTO(struct hrtimer *hrtimer),
-+
-+ TP_ARGS(hrtimer)
-+);
-+
-+/**
-+ * itimer_state - called when itimer is started or canceled
-+ * @which: name of the interval timer
-+ * @value: the itimers value, itimer is canceled if value->it_value is
-+ * zero, otherwise it is started
-+ * @expires: the itimers expiry time
-+ */
-+TRACE_EVENT(itimer_state,
-+
-+ TP_PROTO(int which, const struct itimerval *const value,
-+ cputime_t expires),
-+
-+ TP_ARGS(which, value, expires),
-+
-+ TP_STRUCT__entry(
-+ __field( int, which )
-+ __field( cputime_t, expires )
-+ __field( long, value_sec )
-+ __field( long, value_usec )
-+ __field( long, interval_sec )
-+ __field( long, interval_usec )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->which = which;
-+ __entry->expires = expires;
-+ __entry->value_sec = value->it_value.tv_sec;
-+ __entry->value_usec = value->it_value.tv_usec;
-+ __entry->interval_sec = value->it_interval.tv_sec;
-+ __entry->interval_usec = value->it_interval.tv_usec;
-+ ),
-+
-+ TP_printk("which=%d expires=%llu it_value=%ld.%ld it_interval=%ld.%ld",
-+ __entry->which, (unsigned long long)__entry->expires,
-+ __entry->value_sec, __entry->value_usec,
-+ __entry->interval_sec, __entry->interval_usec)
-+);
-+
-+/**
-+ * itimer_expire - called when itimer expires
-+ * @which: type of the interval timer
-+ * @pid: pid of the process which owns the timer
-+ * @now: current time, used to calculate the latency of itimer
-+ */
-+TRACE_EVENT(itimer_expire,
-+
-+ TP_PROTO(int which, struct pid *pid, cputime_t now),
-+
-+ TP_ARGS(which, pid, now),
-+
-+ TP_STRUCT__entry(
-+ __field( int , which )
-+ __field( pid_t, pid )
-+ __field( cputime_t, now )
-+ ),
-+
-+ TP_fast_assign(
-+ __entry->which = which;
-+ __entry->now = now;
-+ __entry->pid = pid_nr(pid);
-+ ),
-+
-+ TP_printk("which=%d pid=%d now=%llu", __entry->which,
-+ (int) __entry->pid, (unsigned long long)__entry->now)
-+);
-+
-+#endif /* _TRACE_TIMER_H */
-+
-+/* This part must be outside protection */
-+#include <trace/define_trace.h>
---- a/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers_override.h
-+++ b/drivers/staging/lttng/instrumentation/syscalls/headers/syscalls_pointers_override.h
-@@ -1,3 +1,56 @@
-+#define OVERRIDE_32_sys_execve
-+#define OVERRIDE_64_sys_execve
-+
-+#ifndef CREATE_SYSCALL_TABLE
-+
-+SC_TRACE_EVENT(sys_execve,
-+ TP_PROTO(const char *filename, char *const *argv, char *const *envp),
-+ TP_ARGS(filename, argv, envp),
-+ TP_STRUCT__entry(__string_from_user(filename, filename)
-+ __field_hex(char *const *, argv)
-+ __field_hex(char *const *, envp)),
-+ TP_fast_assign(tp_copy_string_from_user(filename, filename)
-+ tp_assign(argv, argv)
-+ tp_assign(envp, envp)),
-+ TP_printk()
-+)
-+
-+SC_TRACE_EVENT(sys_clone,
-+ TP_PROTO(unsigned long clone_flags, unsigned long newsp,
-+ void __user *parent_tid,
-+ void __user *child_tid),
-+ TP_ARGS(clone_flags, newsp, parent_tid, child_tid),
-+ TP_STRUCT__entry(
-+ __field_hex(unsigned long, clone_flags)
-+ __field_hex(unsigned long, newsp)
-+ __field_hex(void *, parent_tid)
-+ __field_hex(void *, child_tid)),
-+ TP_fast_assign(
-+ tp_assign(clone_flags, clone_flags)
-+ tp_assign(newsp, newsp)
-+ tp_assign(parent_tid, parent_tid)
-+ tp_assign(child_tid, child_tid)),
-+ TP_printk()
-+)
-+
-+/* present in 32, missing in 64 due to old kernel headers */
-+#define OVERRIDE_32_sys_getcpu
-+#define OVERRIDE_64_sys_getcpu
-+SC_TRACE_EVENT(sys_getcpu,
-+ TP_PROTO(unsigned __user *cpup, unsigned __user *nodep, void *tcache),
-+ TP_ARGS(cpup, nodep, tcache),
-+ TP_STRUCT__entry(
-+ __field_hex(unsigned *, cpup)
-+ __field_hex(unsigned *, nodep)
-+ __field_hex(void *, tcache)),
-+ TP_fast_assign(
-+ tp_assign(cpup, cpup)
-+ tp_assign(nodep, nodep)
-+ tp_assign(tcache, tcache)),
-+ TP_printk()
-+)
-+
-+#endif /* CREATE_SYSCALL_TABLE */
- /*
- * This is a place-holder for override defines for system calls with
- * pointers (all architectures).
---- a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers_override.h
-+++ b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-32-syscalls-3.1.0-rc6_pointers_override.h
-@@ -1,17 +1,33 @@
--#ifndef CONFIG_UID16
-
--#define OVERRIDE_32_sys_getgroups16
--#define OVERRIDE_32_sys_setgroups16
--#define OVERRIDE_32_sys_lchown16
--#define OVERRIDE_32_sys_getresuid16
--#define OVERRIDE_32_sys_getresgid16
--#define OVERRIDE_32_sys_chown16
--
--#define OVERRIDE_TABLE_32_sys_getgroups16
--#define OVERRIDE_TABLE_32_sys_setgroups16
--#define OVERRIDE_TABLE_32_sys_lchown16
--#define OVERRIDE_TABLE_32_sys_getresuid16
--#define OVERRIDE_TABLE_32_sys_getresgid16
--#define OVERRIDE_TABLE_32_sys_chown16
-+#ifndef CREATE_SYSCALL_TABLE
-+
-+# ifndef CONFIG_UID16
-+# define OVERRIDE_32_sys_getgroups16
-+# define OVERRIDE_32_sys_setgroups16
-+# define OVERRIDE_32_sys_lchown16
-+# define OVERRIDE_32_sys_getresuid16
-+# define OVERRIDE_32_sys_getresgid16
-+# define OVERRIDE_32_sys_chown16
-+# endif
-+
-+#else /* CREATE_SYSCALL_TABLE */
-+
-+# ifndef CONFIG_UID16
-+# define OVERRIDE_TABLE_32_sys_getgroups16
-+# define OVERRIDE_TABLE_32_sys_setgroups16
-+# define OVERRIDE_TABLE_32_sys_lchown16
-+# define OVERRIDE_TABLE_32_sys_getresuid16
-+# define OVERRIDE_TABLE_32_sys_getresgid16
-+# define OVERRIDE_TABLE_32_sys_chown16
-+# endif
-+
-+#define OVERRIDE_TABLE_32_sys_execve
-+TRACE_SYSCALL_TABLE(sys_execve, sys_execve, 11, 3)
-+#define OVERRIDE_TABLE_32_sys_clone
-+TRACE_SYSCALL_TABLE(sys_clone, sys_clone, 120, 5)
-+#define OVERRIDE_TABLE_32_sys_getcpu
-+TRACE_SYSCALL_TABLE(sys_getcpu, sys_getcpu, 318, 3)
-+
-+#endif /* CREATE_SYSCALL_TABLE */
-+
-
--#endif
---- a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_integers_override.h
-+++ b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_integers_override.h
-@@ -1,3 +1,6 @@
- /*
- * this is a place-holder for x86_64 interger syscall definition override.
- */
-+/*
-+ * this is a place-holder for x86_64 interger syscall definition override.
-+ */
---- a/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_pointers_override.h
-+++ b/drivers/staging/lttng/instrumentation/syscalls/headers/x86-64-syscalls-3.0.4_pointers_override.h
-@@ -2,4 +2,11 @@
-
- #else /* CREATE_SYSCALL_TABLE */
-
-+#define OVERRIDE_TABLE_64_sys_clone
-+TRACE_SYSCALL_TABLE(sys_clone, sys_clone, 56, 5)
-+#define OVERRIDE_TABLE_64_sys_execve
-+TRACE_SYSCALL_TABLE(sys_execve, sys_execve, 59, 3)
-+#define OVERRIDE_TABLE_64_sys_getcpu
-+TRACE_SYSCALL_TABLE(sys_getcpu, sys_getcpu, 309, 3)
-+
- #endif /* CREATE_SYSCALL_TABLE */
---- a/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/lttng-syscalls-extractor.c
-+++ b/drivers/staging/lttng/instrumentation/syscalls/lttng-syscalls-extractor/lttng-syscalls-extractor.c
-@@ -1,10 +1,24 @@
- /*
-+ * lttng-syscalls-extractor.c
-+ *
-+ * Dump syscall metadata to console.
-+ *
- * Copyright 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
- *
-- * Dump syscall metadata to console.
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
- *
-- * GPLv2 license.
-+ * You should have received a copy of the GNU General Public License along
-+ * with this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
- #include <linux/module.h>
---- a/drivers/staging/lttng/lib/Makefile
-+++ b/drivers/staging/lttng/lib/Makefile
-@@ -1,6 +1,6 @@
--obj-m += lib-ring-buffer.o
-+obj-m += lttng-lib-ring-buffer.o
-
--lib-ring-buffer-objs := \
-+lttng-lib-ring-buffer-objs := \
- ringbuffer/ring_buffer_backend.o \
- ringbuffer/ring_buffer_frontend.o \
- ringbuffer/ring_buffer_iterator.o \
---- a/drivers/staging/lttng/lib/align.h
-+++ b/drivers/staging/lttng/lib/align.h
-@@ -4,9 +4,21 @@
- /*
- * lib/align.h
- *
-- * (C) Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #ifdef __KERNEL__
---- a/drivers/staging/lttng/lib/bitfield.h
-+++ b/drivers/staging/lttng/lib/bitfield.h
-@@ -19,7 +19,7 @@
- * all copies or substantial portions of the Software.
- */
-
--#include "../ltt-endian.h"
-+#include "../lttng-endian.h"
-
- #ifndef CHAR_BIT
- #define CHAR_BIT 8
---- a/drivers/staging/lttng/lib/bug.h
-+++ b/drivers/staging/lttng/lib/bug.h
-@@ -4,9 +4,21 @@
- /*
- * lib/bug.h
- *
-- * (C) Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- /**
---- a/drivers/staging/lttng/lib/ringbuffer/api.h
-+++ b/drivers/staging/lttng/lib/ringbuffer/api.h
-@@ -1,14 +1,26 @@
--#ifndef _LINUX_RING_BUFFER_API_H
--#define _LINUX_RING_BUFFER_API_H
-+#ifndef _LIB_RING_BUFFER_API_H
-+#define _LIB_RING_BUFFER_API_H
-
- /*
-- * linux/ringbuffer/api.h
-- *
-- * Copyright (C) 2010 - Mathieu Desnoyers "mathieu.desnoyers@efficios.com"
-+ * lib/ringbuffer/api.h
- *
- * Ring Buffer API.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include "../../wrapper/ringbuffer/backend.h"
-@@ -22,4 +34,4 @@
- */
- #include "../../wrapper/ringbuffer/frontend_api.h"
-
--#endif /* _LINUX_RING_BUFFER_API_H */
-+#endif /* _LIB_RING_BUFFER_API_H */
---- a/drivers/staging/lttng/lib/ringbuffer/backend.h
-+++ b/drivers/staging/lttng/lib/ringbuffer/backend.h
-@@ -1,14 +1,26 @@
--#ifndef _LINUX_RING_BUFFER_BACKEND_H
--#define _LINUX_RING_BUFFER_BACKEND_H
-+#ifndef _LIB_RING_BUFFER_BACKEND_H
-+#define _LIB_RING_BUFFER_BACKEND_H
-
- /*
-- * linux/ringbuffer/backend.h
-- *
-- * Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * lib/ringbuffer/backend.h
- *
- * Ring buffer backend (API).
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by
- * the reader in flight recorder mode.
-@@ -247,4 +259,4 @@ ssize_t lib_ring_buffer_file_splice_read
- size_t len, unsigned int flags);
- loff_t lib_ring_buffer_no_llseek(struct file *file, loff_t offset, int origin);
-
--#endif /* _LINUX_RING_BUFFER_BACKEND_H */
-+#endif /* _LIB_RING_BUFFER_BACKEND_H */
---- a/drivers/staging/lttng/lib/ringbuffer/backend_internal.h
-+++ b/drivers/staging/lttng/lib/ringbuffer/backend_internal.h
-@@ -1,14 +1,26 @@
--#ifndef _LINUX_RING_BUFFER_BACKEND_INTERNAL_H
--#define _LINUX_RING_BUFFER_BACKEND_INTERNAL_H
-+#ifndef _LIB_RING_BUFFER_BACKEND_INTERNAL_H
-+#define _LIB_RING_BUFFER_BACKEND_INTERNAL_H
-
- /*
-- * linux/ringbuffer/backend_internal.h
-- *
-- * Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * lib/ringbuffer/backend_internal.h
- *
- * Ring buffer backend (internal helpers).
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2008-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include "../../wrapper/ringbuffer/config.h"
-@@ -446,4 +458,4 @@ void lib_ring_buffer_do_memset(char *des
- dest[i] = c;
- }
-
--#endif /* _LINUX_RING_BUFFER_BACKEND_INTERNAL_H */
-+#endif /* _LIB_RING_BUFFER_BACKEND_INTERNAL_H */
---- a/drivers/staging/lttng/lib/ringbuffer/backend_types.h
-+++ b/drivers/staging/lttng/lib/ringbuffer/backend_types.h
-@@ -1,14 +1,26 @@
--#ifndef _LINUX_RING_BUFFER_BACKEND_TYPES_H
--#define _LINUX_RING_BUFFER_BACKEND_TYPES_H
-+#ifndef _LIB_RING_BUFFER_BACKEND_TYPES_H
-+#define _LIB_RING_BUFFER_BACKEND_TYPES_H
-
- /*
-- * linux/ringbuffer/backend_types.h
-- *
-- * Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * lib/ringbuffer/backend_types.h
- *
- * Ring buffer backend (types).
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2008-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/cpumask.h>
-@@ -72,9 +84,14 @@ struct channel_backend {
- u64 start_tsc; /* Channel creation TSC value */
- void *priv; /* Client-specific information */
- struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */
-- const struct lib_ring_buffer_config *config; /* Ring buffer configuration */
-+ /*
-+ * We need to copy config because the module containing the
-+ * source config can vanish before the last reference to this
-+ * channel's streams is released.
-+ */
-+ struct lib_ring_buffer_config config; /* Ring buffer configuration */
- cpumask_var_t cpumask; /* Allocated per-cpu buffers cpumask */
- char name[NAME_MAX]; /* Channel name */
- };
-
--#endif /* _LINUX_RING_BUFFER_BACKEND_TYPES_H */
-+#endif /* _LIB_RING_BUFFER_BACKEND_TYPES_H */
---- a/drivers/staging/lttng/lib/ringbuffer/config.h
-+++ b/drivers/staging/lttng/lib/ringbuffer/config.h
-@@ -1,15 +1,27 @@
--#ifndef _LINUX_RING_BUFFER_CONFIG_H
--#define _LINUX_RING_BUFFER_CONFIG_H
-+#ifndef _LIB_RING_BUFFER_CONFIG_H
-+#define _LIB_RING_BUFFER_CONFIG_H
-
- /*
-- * linux/ringbuffer/config.h
-- *
-- * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * lib/ringbuffer/config.h
- *
- * Ring buffer configuration header. Note: after declaring the standard inline
- * functions, clients should also include linux/ringbuffer/api.h.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/types.h>
-@@ -295,4 +307,4 @@ int lib_ring_buffer_check_config(const s
-
- #include "../../wrapper/ringbuffer/vatomic.h"
-
--#endif /* _LINUX_RING_BUFFER_CONFIG_H */
-+#endif /* _LIB_RING_BUFFER_CONFIG_H */
---- a/drivers/staging/lttng/lib/ringbuffer/frontend.h
-+++ b/drivers/staging/lttng/lib/ringbuffer/frontend.h
-@@ -1,19 +1,31 @@
--#ifndef _LINUX_RING_BUFFER_FRONTEND_H
--#define _LINUX_RING_BUFFER_FRONTEND_H
-+#ifndef _LIB_RING_BUFFER_FRONTEND_H
-+#define _LIB_RING_BUFFER_FRONTEND_H
-
- /*
-- * linux/ringbuffer/frontend.h
-- *
-- * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * lib/ringbuffer/frontend.h
- *
- * Ring Buffer Library Synchronization Header (API).
- *
-+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ *
- * Author:
- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * See ring_buffer_frontend.c for more information on wait-free algorithms.
-- *
-- * Dual LGPL v2.1/GPL v2 license.
- */
-
- #include <linux/pipe_fs_i.h>
-@@ -225,4 +237,4 @@ unsigned long lib_ring_buffer_get_record
- return v_read(config, &buf->backend.records_read);
- }
-
--#endif /* _LINUX_RING_BUFFER_FRONTEND_H */
-+#endif /* _LIB_RING_BUFFER_FRONTEND_H */
---- a/drivers/staging/lttng/lib/ringbuffer/frontend_api.h
-+++ b/drivers/staging/lttng/lib/ringbuffer/frontend_api.h
-@@ -1,20 +1,32 @@
--#ifndef _LINUX_RING_BUFFER_FRONTEND_API_H
--#define _LINUX_RING_BUFFER_FRONTEND_API_H
-+#ifndef _LIB_RING_BUFFER_FRONTEND_API_H
-+#define _LIB_RING_BUFFER_FRONTEND_API_H
-
- /*
-- * linux/ringbuffer/frontend_api.h
-- *
-- * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * lib/ringbuffer/frontend_api.h
- *
- * Ring Buffer Library Synchronization Header (buffer write API).
- *
-+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ *
- * Author:
- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * See ring_buffer_frontend.c for more information on wait-free algorithms.
- * See linux/ringbuffer/frontend.h for channel allocation and read-side API.
-- *
-- * Dual LGPL v2.1/GPL v2 license.
- */
-
- #include "../../wrapper/ringbuffer/frontend.h"
-@@ -355,4 +367,4 @@ void lib_ring_buffer_record_enable(const
- atomic_dec(&buf->record_disabled);
- }
-
--#endif /* _LINUX_RING_BUFFER_FRONTEND_API_H */
-+#endif /* _LIB_RING_BUFFER_FRONTEND_API_H */
---- a/drivers/staging/lttng/lib/ringbuffer/frontend_internal.h
-+++ b/drivers/staging/lttng/lib/ringbuffer/frontend_internal.h
-@@ -1,19 +1,31 @@
--#ifndef _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H
--#define _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H
-+#ifndef _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
-+#define _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
-
- /*
- * linux/ringbuffer/frontend_internal.h
- *
-- * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
- * Ring Buffer Library Synchronization Header (internal helpers).
- *
-+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ *
- * Author:
- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * See ring_buffer_frontend.c for more information on wait-free algorithms.
-- *
-- * Dual LGPL v2.1/GPL v2 license.
- */
-
- #include "../../wrapper/ringbuffer/config.h"
-@@ -421,4 +433,4 @@ extern void lib_ring_buffer_free(struct
- /* Keep track of trap nesting inside ring buffer code */
- DECLARE_PER_CPU(unsigned int, lib_ring_buffer_nesting);
-
--#endif /* _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H */
-+#endif /* _LIB_RING_BUFFER_FRONTEND_INTERNAL_H */
---- a/drivers/staging/lttng/lib/ringbuffer/frontend_types.h
-+++ b/drivers/staging/lttng/lib/ringbuffer/frontend_types.h
-@@ -1,19 +1,31 @@
--#ifndef _LINUX_RING_BUFFER_FRONTEND_TYPES_H
--#define _LINUX_RING_BUFFER_FRONTEND_TYPES_H
-+#ifndef _LIB_RING_BUFFER_FRONTEND_TYPES_H
-+#define _LIB_RING_BUFFER_FRONTEND_TYPES_H
-
- /*
-- * linux/ringbuffer/frontend_types.h
-- *
-- * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * lib/ringbuffer/frontend_types.h
- *
- * Ring Buffer Library Synchronization Header (types).
- *
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ *
- * Author:
- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * See ring_buffer_frontend.c for more information on wait-free algorithms.
-- *
-- * Dual LGPL v2.1/GPL v2 license.
- */
-
- #include <linux/kref.h>
-@@ -138,9 +150,9 @@ struct lib_ring_buffer {
- unsigned long get_subbuf_consumed; /* Read-side consumed */
- unsigned long prod_snapshot; /* Producer count snapshot */
- unsigned long cons_snapshot; /* Consumer count snapshot */
-- uint get_subbuf:1; /* Sub-buffer being held by reader */
-- uint switch_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
-- uint read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
-+ uint get_subbuf:1, /* Sub-buffer being held by reader */
-+ switch_timer_enabled:1, /* Protected by ring_buffer_nohz_lock */
-+ read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
- };
-
- static inline
-@@ -173,4 +185,4 @@ void *channel_get_private(struct channel
- _____ret; \
- })
-
--#endif /* _LINUX_RING_BUFFER_FRONTEND_TYPES_H */
-+#endif /* _LIB_RING_BUFFER_FRONTEND_TYPES_H */
---- a/drivers/staging/lttng/lib/ringbuffer/iterator.h
-+++ b/drivers/staging/lttng/lib/ringbuffer/iterator.h
-@@ -1,17 +1,29 @@
--#ifndef _LINUX_RING_BUFFER_ITERATOR_H
--#define _LINUX_RING_BUFFER_ITERATOR_H
-+#ifndef _LIB_RING_BUFFER_ITERATOR_H
-+#define _LIB_RING_BUFFER_ITERATOR_H
-
- /*
-- * linux/ringbuffer/iterator.h
-- *
-- * (C) Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * lib/ringbuffer/iterator.h
- *
- * Ring buffer and channel iterators.
- *
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ *
- * Author:
- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
-- * Dual LGPL v2.1/GPL v2 license.
- */
-
- #include "../../wrapper/ringbuffer/backend.h"
-@@ -67,4 +79,4 @@ void channel_iterator_free(struct channe
- void channel_iterator_reset(struct channel *chan);
- void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf);
-
--#endif /* _LINUX_RING_BUFFER_ITERATOR_H */
-+#endif /* _LIB_RING_BUFFER_ITERATOR_H */
---- a/drivers/staging/lttng/lib/ringbuffer/nohz.h
-+++ b/drivers/staging/lttng/lib/ringbuffer/nohz.h
-@@ -1,12 +1,24 @@
--#ifndef _LINUX_RING_BUFFER_NOHZ_H
--#define _LINUX_RING_BUFFER_NOHZ_H
-+#ifndef _LIB_RING_BUFFER_NOHZ_H
-+#define _LIB_RING_BUFFER_NOHZ_H
-
- /*
-- * ringbuffer/nohz.h
-+ * lib/ringbuffer/nohz.h
- *
-- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #ifdef CONFIG_LIB_RING_BUFFER
-@@ -27,4 +39,4 @@ static inline void lib_ring_buffer_tick_
- }
- #endif
-
--#endif /* _LINUX_RING_BUFFER_NOHZ_H */
-+#endif /* _LIB_RING_BUFFER_NOHZ_H */
---- a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_backend.c
-+++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_backend.c
-@@ -1,9 +1,21 @@
- /*
- * ring_buffer_backend.c
- *
-- * Copyright (C) 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/stddef.h>
-@@ -155,7 +167,7 @@ pages_error:
- int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
- struct channel_backend *chanb, int cpu)
- {
-- const struct lib_ring_buffer_config *config = chanb->config;
-+ const struct lib_ring_buffer_config *config = &chanb->config;
-
- bufb->chan = container_of(chanb, struct channel, backend);
- bufb->cpu = cpu;
-@@ -187,7 +199,7 @@ void lib_ring_buffer_backend_free(struct
- void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
- {
- struct channel_backend *chanb = &bufb->chan->backend;
-- const struct lib_ring_buffer_config *config = chanb->config;
-+ const struct lib_ring_buffer_config *config = &chanb->config;
- unsigned long num_subbuf_alloc;
- unsigned int i;
-
-@@ -221,7 +233,7 @@ void lib_ring_buffer_backend_reset(struc
- void channel_backend_reset(struct channel_backend *chanb)
- {
- struct channel *chan = container_of(chanb, struct channel, backend);
-- const struct lib_ring_buffer_config *config = chanb->config;
-+ const struct lib_ring_buffer_config *config = &chanb->config;
-
- /*
- * Don't reset buf_size, subbuf_size, subbuf_size_order,
-@@ -248,7 +260,7 @@ int __cpuinit lib_ring_buffer_cpu_hp_cal
- unsigned int cpu = (unsigned long)hcpu;
- struct channel_backend *chanb = container_of(nb, struct channel_backend,
- cpu_hp_notifier);
-- const struct lib_ring_buffer_config *config = chanb->config;
-+ const struct lib_ring_buffer_config *config = &chanb->config;
- struct lib_ring_buffer *buf;
- int ret;
-
-@@ -307,18 +319,18 @@ int channel_backend_init(struct channel_
- if (!name)
- return -EPERM;
-
-- if (!(subbuf_size && num_subbuf))
-- return -EPERM;
--
- /* Check that the subbuffer size is larger than a page. */
- if (subbuf_size < PAGE_SIZE)
- return -EINVAL;
-
- /*
-- * Make sure the number of subbuffers and subbuffer size are power of 2.
-+ * Make sure the number of subbuffers and subbuffer size are
-+ * power of 2 and nonzero.
- */
-- CHAN_WARN_ON(chanb, hweight32(subbuf_size) != 1);
-- CHAN_WARN_ON(chanb, hweight32(num_subbuf) != 1);
-+ if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
-+ return -EINVAL;
-+ if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
-+ return -EINVAL;
-
- ret = subbuffer_id_check_index(config, num_subbuf);
- if (ret)
-@@ -334,7 +346,7 @@ int channel_backend_init(struct channel_
- (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
- chanb->num_subbuf = num_subbuf;
- strlcpy(chanb->name, name, NAME_MAX);
-- chanb->config = config;
-+ memcpy(&chanb->config, config, sizeof(chanb->config));
-
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
-@@ -421,7 +433,7 @@ free_cpumask:
- */
- void channel_backend_unregister_notifiers(struct channel_backend *chanb)
- {
-- const struct lib_ring_buffer_config *config = chanb->config;
-+ const struct lib_ring_buffer_config *config = &chanb->config;
-
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
-@@ -435,7 +447,7 @@ void channel_backend_unregister_notifier
- */
- void channel_backend_free(struct channel_backend *chanb)
- {
-- const struct lib_ring_buffer_config *config = chanb->config;
-+ const struct lib_ring_buffer_config *config = &chanb->config;
- unsigned int i;
-
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-@@ -469,7 +481,7 @@ void _lib_ring_buffer_write(struct lib_r
- const void *src, size_t len, ssize_t pagecpy)
- {
- struct channel_backend *chanb = &bufb->chan->backend;
-- const struct lib_ring_buffer_config *config = chanb->config;
-+ const struct lib_ring_buffer_config *config = &chanb->config;
- size_t sbidx, index;
- struct lib_ring_buffer_backend_pages *rpages;
- unsigned long sb_bindex, id;
-@@ -515,7 +527,7 @@ void _lib_ring_buffer_memset(struct lib_
- int c, size_t len, ssize_t pagecpy)
- {
- struct channel_backend *chanb = &bufb->chan->backend;
-- const struct lib_ring_buffer_config *config = chanb->config;
-+ const struct lib_ring_buffer_config *config = &chanb->config;
- size_t sbidx, index;
- struct lib_ring_buffer_backend_pages *rpages;
- unsigned long sb_bindex, id;
-@@ -564,7 +576,7 @@ void _lib_ring_buffer_copy_from_user(str
- ssize_t pagecpy)
- {
- struct channel_backend *chanb = &bufb->chan->backend;
-- const struct lib_ring_buffer_config *config = chanb->config;
-+ const struct lib_ring_buffer_config *config = &chanb->config;
- size_t sbidx, index;
- struct lib_ring_buffer_backend_pages *rpages;
- unsigned long sb_bindex, id;
-@@ -616,7 +628,7 @@ size_t lib_ring_buffer_read(struct lib_r
- void *dest, size_t len)
- {
- struct channel_backend *chanb = &bufb->chan->backend;
-- const struct lib_ring_buffer_config *config = chanb->config;
-+ const struct lib_ring_buffer_config *config = &chanb->config;
- size_t index;
- ssize_t pagecpy, orig_len;
- struct lib_ring_buffer_backend_pages *rpages;
-@@ -668,7 +680,7 @@ int __lib_ring_buffer_copy_to_user(struc
- size_t offset, void __user *dest, size_t len)
- {
- struct channel_backend *chanb = &bufb->chan->backend;
-- const struct lib_ring_buffer_config *config = chanb->config;
-+ const struct lib_ring_buffer_config *config = &chanb->config;
- size_t index;
- ssize_t pagecpy;
- struct lib_ring_buffer_backend_pages *rpages;
-@@ -719,7 +731,7 @@ int lib_ring_buffer_read_cstr(struct lib
- void *dest, size_t len)
- {
- struct channel_backend *chanb = &bufb->chan->backend;
-- const struct lib_ring_buffer_config *config = chanb->config;
-+ const struct lib_ring_buffer_config *config = &chanb->config;
- size_t index;
- ssize_t pagecpy, pagelen, strpagelen, orig_offset;
- char *str;
-@@ -777,7 +789,7 @@ struct page **lib_ring_buffer_read_get_p
- size_t index;
- struct lib_ring_buffer_backend_pages *rpages;
- struct channel_backend *chanb = &bufb->chan->backend;
-- const struct lib_ring_buffer_config *config = chanb->config;
-+ const struct lib_ring_buffer_config *config = &chanb->config;
- unsigned long sb_bindex, id;
-
- offset &= chanb->buf_size - 1;
-@@ -808,7 +820,7 @@ void *lib_ring_buffer_read_offset_addres
- size_t index;
- struct lib_ring_buffer_backend_pages *rpages;
- struct channel_backend *chanb = &bufb->chan->backend;
-- const struct lib_ring_buffer_config *config = chanb->config;
-+ const struct lib_ring_buffer_config *config = &chanb->config;
- unsigned long sb_bindex, id;
-
- offset &= chanb->buf_size - 1;
-@@ -838,7 +850,7 @@ void *lib_ring_buffer_offset_address(str
- size_t sbidx, index;
- struct lib_ring_buffer_backend_pages *rpages;
- struct channel_backend *chanb = &bufb->chan->backend;
-- const struct lib_ring_buffer_config *config = chanb->config;
-+ const struct lib_ring_buffer_config *config = &chanb->config;
- unsigned long sb_bindex, id;
-
- offset &= chanb->buf_size - 1;
---- a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c
-+++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_frontend.c
-@@ -1,7 +1,22 @@
- /*
- * ring_buffer_frontend.c
- *
-- * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ *
- *
- * Ring buffer wait-free buffer synchronization. Producer-consumer and flight
- * recorder (overwrite) modes. See thesis:
-@@ -34,8 +49,6 @@
- * - splice one subbuffer worth of data to a pipe
- * - splice the data from pipe to disk/network
- * - put_subbuf
-- *
-- * Dual LGPL v2.1/GPL v2 license.
- */
-
- #include <linux/delay.h>
-@@ -103,7 +116,7 @@ void lib_ring_buffer_free(struct lib_rin
- void lib_ring_buffer_reset(struct lib_ring_buffer *buf)
- {
- struct channel *chan = buf->backend.chan;
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- unsigned int i;
-
- /*
-@@ -161,7 +174,7 @@ EXPORT_SYMBOL_GPL(channel_reset);
- int lib_ring_buffer_create(struct lib_ring_buffer *buf,
- struct channel_backend *chanb, int cpu)
- {
-- const struct lib_ring_buffer_config *config = chanb->config;
-+ const struct lib_ring_buffer_config *config = &chanb->config;
- struct channel *chan = container_of(chanb, struct channel, backend);
- void *priv = chanb->priv;
- size_t subbuf_header_size;
-@@ -253,7 +266,7 @@ static void switch_buffer_timer(unsigned
- {
- struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
- struct channel *chan = buf->backend.chan;
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
-
- /*
- * Only flush buffers periodically if readers are active.
-@@ -275,7 +288,7 @@ static void switch_buffer_timer(unsigned
- static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf)
- {
- struct channel *chan = buf->backend.chan;
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
-
- if (!chan->switch_timer_interval || buf->switch_timer_enabled)
- return;
-@@ -311,7 +324,7 @@ static void read_buffer_timer(unsigned l
- {
- struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
- struct channel *chan = buf->backend.chan;
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
-
- CHAN_WARN_ON(chan, !buf->backend.allocated);
-
-@@ -335,7 +348,7 @@ static void read_buffer_timer(unsigned l
- static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf)
- {
- struct channel *chan = buf->backend.chan;
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
-
- if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
- || !chan->read_timer_interval
-@@ -360,7 +373,7 @@ static void lib_ring_buffer_start_read_t
- static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf)
- {
- struct channel *chan = buf->backend.chan;
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
-
- if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
- || !chan->read_timer_interval
-@@ -397,7 +410,7 @@ int __cpuinit lib_ring_buffer_cpu_hp_cal
- struct channel *chan = container_of(nb, struct channel,
- cpu_hp_notifier);
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
-
- if (!chan->cpu_hp_enable)
- return NOTIFY_DONE;
-@@ -452,7 +465,7 @@ static int notrace ring_buffer_tick_nohz
- {
- struct channel *chan = container_of(nb, struct channel,
- tick_nohz_notifier);
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
- int cpu = smp_processor_id();
-
-@@ -524,7 +537,7 @@ void notrace lib_ring_buffer_tick_nohz_r
- */
- static void channel_unregister_notifiers(struct channel *chan)
- {
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- int cpu;
-
- channel_iterator_unregister_notifiers(chan);
-@@ -708,7 +721,7 @@ void channel_release(struct kref *kref)
- void *channel_destroy(struct channel *chan)
- {
- int cpu;
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- void *priv;
-
- channel_unregister_notifiers(chan);
-@@ -818,7 +831,7 @@ int lib_ring_buffer_snapshot(struct lib_
- unsigned long *consumed, unsigned long *produced)
- {
- struct channel *chan = buf->backend.chan;
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long consumed_cur, write_offset;
- int finalized;
-
-@@ -909,7 +922,7 @@ int lib_ring_buffer_get_subbuf(struct li
- unsigned long consumed)
- {
- struct channel *chan = buf->backend.chan;
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
- int ret;
- int finalized;
-@@ -1055,7 +1068,7 @@ void lib_ring_buffer_put_subbuf(struct l
- {
- struct lib_ring_buffer_backend *bufb = &buf->backend;
- struct channel *chan = bufb->chan;
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long read_sb_bindex, consumed_idx, consumed;
-
- CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
-@@ -1114,7 +1127,7 @@ void lib_ring_buffer_print_subbuffer_err
- unsigned long cons_offset,
- int cpu)
- {
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long cons_idx, commit_count, commit_count_sb;
-
- cons_idx = subbuf_index(cons_offset, chan);
-@@ -1140,7 +1153,7 @@ void lib_ring_buffer_print_buffer_errors
- struct channel *chan,
- void *priv, int cpu)
- {
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long write_offset, cons_offset;
-
- /*
-@@ -1170,27 +1183,34 @@ static
- void lib_ring_buffer_print_errors(struct channel *chan,
- struct lib_ring_buffer *buf, int cpu)
- {
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- void *priv = chan->backend.priv;
-
-- printk(KERN_DEBUG "ring buffer %s, cpu %d: %lu records written, "
-- "%lu records overrun\n",
-- chan->backend.name, cpu,
-- v_read(config, &buf->records_count),
-- v_read(config, &buf->records_overrun));
--
-- if (v_read(config, &buf->records_lost_full)
-- || v_read(config, &buf->records_lost_wrap)
-- || v_read(config, &buf->records_lost_big))
-- printk(KERN_WARNING
-- "ring buffer %s, cpu %d: records were lost. Caused by:\n"
-- " [ %lu buffer full, %lu nest buffer wrap-around, "
-- "%lu event too big ]\n",
-- chan->backend.name, cpu,
-- v_read(config, &buf->records_lost_full),
-- v_read(config, &buf->records_lost_wrap),
-- v_read(config, &buf->records_lost_big));
--
-+ if (!strcmp(chan->backend.name, "relay-metadata")) {
-+ printk(KERN_DEBUG "ring buffer %s: %lu records written, "
-+ "%lu records overrun\n",
-+ chan->backend.name,
-+ v_read(config, &buf->records_count),
-+ v_read(config, &buf->records_overrun));
-+ } else {
-+ printk(KERN_DEBUG "ring buffer %s, cpu %d: %lu records written, "
-+ "%lu records overrun\n",
-+ chan->backend.name, cpu,
-+ v_read(config, &buf->records_count),
-+ v_read(config, &buf->records_overrun));
-+
-+ if (v_read(config, &buf->records_lost_full)
-+ || v_read(config, &buf->records_lost_wrap)
-+ || v_read(config, &buf->records_lost_big))
-+ printk(KERN_WARNING
-+ "ring buffer %s, cpu %d: records were lost. Caused by:\n"
-+ " [ %lu buffer full, %lu nest buffer wrap-around, "
-+ "%lu event too big ]\n",
-+ chan->backend.name, cpu,
-+ v_read(config, &buf->records_lost_full),
-+ v_read(config, &buf->records_lost_wrap),
-+ v_read(config, &buf->records_lost_big));
-+ }
- lib_ring_buffer_print_buffer_errors(buf, chan, priv, cpu);
- }
-
-@@ -1205,7 +1225,7 @@ void lib_ring_buffer_switch_old_start(st
- struct switch_offsets *offsets,
- u64 tsc)
- {
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long oldidx = subbuf_index(offsets->old, chan);
- unsigned long commit_count;
-
-@@ -1249,7 +1269,7 @@ void lib_ring_buffer_switch_old_end(stru
- struct switch_offsets *offsets,
- u64 tsc)
- {
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
- unsigned long commit_count, padding_size, data_size;
-
-@@ -1292,7 +1312,7 @@ void lib_ring_buffer_switch_new_start(st
- struct switch_offsets *offsets,
- u64 tsc)
- {
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long beginidx = subbuf_index(offsets->begin, chan);
- unsigned long commit_count;
-
-@@ -1334,7 +1354,7 @@ void lib_ring_buffer_switch_new_end(stru
- struct switch_offsets *offsets,
- u64 tsc)
- {
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long endidx = subbuf_index(offsets->end - 1, chan);
- unsigned long commit_count, padding_size, data_size;
-
-@@ -1376,7 +1396,7 @@ int lib_ring_buffer_try_switch_slow(enum
- struct switch_offsets *offsets,
- u64 *tsc)
- {
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long off;
-
- offsets->begin = v_read(config, &buf->offset);
-@@ -1435,7 +1455,7 @@ int lib_ring_buffer_try_switch_slow(enum
- void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode)
- {
- struct channel *chan = buf->backend.chan;
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct switch_offsets offsets;
- unsigned long oldidx;
- u64 tsc;
-@@ -1496,7 +1516,7 @@ int lib_ring_buffer_try_reserve_slow(str
- struct switch_offsets *offsets,
- struct lib_ring_buffer_ctx *ctx)
- {
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long reserve_commit_diff;
-
- offsets->begin = v_read(config, &buf->offset);
-@@ -1631,7 +1651,7 @@ int lib_ring_buffer_try_reserve_slow(str
- int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx)
- {
- struct channel *chan = ctx->chan;
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
- struct switch_offsets offsets;
- int ret;
---- a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_iterator.c
-+++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_iterator.c
-@@ -1,16 +1,28 @@
- /*
- * ring_buffer_iterator.c
- *
-- * (C) Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
- * Ring buffer and channel iterators. Get each event of a channel in order. Uses
- * a prio heap for per-cpu buffers, giving a O(log(NR_CPUS)) algorithmic
- * complexity for the "get next event" operation.
- *
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ *
- * Author:
- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
-- * Dual LGPL v2.1/GPL v2 license.
- */
-
- #include "../../wrapper/ringbuffer/iterator.h"
-@@ -40,7 +52,7 @@
- ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
- struct lib_ring_buffer *buf)
- {
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer_iter *iter = &buf->iter;
- int ret;
-
-@@ -225,7 +237,7 @@ void lib_ring_buffer_wait_for_qs(const s
- ssize_t channel_get_next_record(struct channel *chan,
- struct lib_ring_buffer **ret_buf)
- {
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
- struct lttng_ptr_heap *heap;
- ssize_t len;
-@@ -333,7 +345,7 @@ void lib_ring_buffer_iterator_init(struc
- }
-
- /* Add to list of buffers without any current record */
-- if (chan->backend.config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-+ if (chan->backend.config.alloc == RING_BUFFER_ALLOC_PER_CPU)
- list_add(&buf->iter.empty_node, &chan->iter.empty_head);
- }
-
-@@ -347,7 +359,7 @@ int __cpuinit channel_iterator_cpu_hotpl
- struct channel *chan = container_of(nb, struct channel,
- hp_iter_notifier);
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
-
- if (!chan->hp_iter_enable)
- return NOTIFY_DONE;
-@@ -369,7 +381,7 @@ int __cpuinit channel_iterator_cpu_hotpl
-
- int channel_iterator_init(struct channel *chan)
- {
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
-
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-@@ -413,7 +425,7 @@ int channel_iterator_init(struct channel
-
- void channel_iterator_unregister_notifiers(struct channel *chan)
- {
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
-
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- chan->hp_iter_enable = 0;
-@@ -423,7 +435,7 @@ void channel_iterator_unregister_notifie
-
- void channel_iterator_free(struct channel *chan)
- {
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
-
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- lttng_heap_free(&chan->iter.heap);
-@@ -432,7 +444,7 @@ void channel_iterator_free(struct channe
- int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf)
- {
- struct channel *chan = buf->backend.chan;
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
- return lib_ring_buffer_open_read(buf);
- }
-@@ -451,7 +463,7 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_iterat
-
- int channel_iterator_open(struct channel *chan)
- {
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
- int ret = 0, cpu;
-
-@@ -484,7 +496,7 @@ EXPORT_SYMBOL_GPL(channel_iterator_open)
-
- void channel_iterator_release(struct channel *chan)
- {
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
- int cpu;
-
-@@ -527,7 +539,7 @@ void lib_ring_buffer_iterator_reset(stru
-
- void channel_iterator_reset(struct channel *chan)
- {
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- struct lib_ring_buffer *buf;
- int cpu;
-
-@@ -558,7 +570,7 @@ ssize_t channel_ring_buffer_file_read(st
- struct lib_ring_buffer *buf,
- int fusionmerge)
- {
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- size_t read_count = 0, read_offset;
- ssize_t len;
-
-@@ -706,7 +718,7 @@ ssize_t channel_file_read(struct file *f
- {
- struct inode *inode = filp->f_dentry->d_inode;
- struct channel *chan = inode->i_private;
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
-
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- return channel_ring_buffer_file_read(filp, user_buf, count,
---- a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c
-+++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_mmap.c
-@@ -3,11 +3,23 @@
- *
- * Copyright (C) 2002-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
- * Copyright (C) 1999-2005 - Karim Yaghmour <karim@opersys.com>
-- * Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * Copyright (C) 2008-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
-- * Re-using content from kernel/relay.c.
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; only version 2 of the License.
- *
-- * This file is released under the GPL v2.
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along
-+ * with this program; if not, write to the Free Software Foundation, Inc.,
-+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-+ *
-+ * Re-using code from kernel/relay.c, hence the GPLv2 license for this
-+ * file.
- */
-
- #include <linux/module.h>
-@@ -24,12 +36,16 @@ static int lib_ring_buffer_fault(struct
- {
- struct lib_ring_buffer *buf = vma->vm_private_data;
- struct channel *chan = buf->backend.chan;
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- pgoff_t pgoff = vmf->pgoff;
- struct page **page;
- void **virt;
- unsigned long offset, sb_bindex;
-
-+
-+ if (!buf)
-+ return VM_FAULT_OOM;
-+
- /*
- * Verify that faults are only done on the range of pages owned by the
- * reader.
-@@ -74,12 +90,15 @@ static int lib_ring_buffer_mmap_buf(stru
- {
- unsigned long length = vma->vm_end - vma->vm_start;
- struct channel *chan = buf->backend.chan;
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long mmap_buf_len;
-
- if (config->output != RING_BUFFER_MMAP)
- return -EINVAL;
-
-+ if (!buf)
-+ return -EBADF;
-+
- mmap_buf_len = chan->backend.buf_size;
- if (chan->backend.extra_reader_sb)
- mmap_buf_len += chan->backend.subbuf_size;
---- a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_splice.c
-+++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_splice.c
-@@ -3,11 +3,24 @@
- *
- * Copyright (C) 2002-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
- * Copyright (C) 1999-2005 - Karim Yaghmour <karim@opersys.com>
-- * Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * Copyright (C) 2008-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
-- * Re-using content from kernel/relay.c.
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
- *
-- * This file is released under the GPL v2.
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ *
-+ * Re-using code from kernel/relay.c, which is why it is licensed under
-+ * the GPLv2.
- */
-
- #include <linux/module.h>
-@@ -69,7 +82,7 @@ static int subbuf_splice_actor(struct fi
- {
- struct lib_ring_buffer *buf = in->private_data;
- struct channel *chan = buf->backend.chan;
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- unsigned int poff, subbuf_pages, nr_pages;
- struct page *pages[PIPE_DEF_BUFFERS];
- struct partial_page partial[PIPE_DEF_BUFFERS];
-@@ -151,7 +164,7 @@ ssize_t lib_ring_buffer_splice_read(stru
- {
- struct lib_ring_buffer *buf = in->private_data;
- struct channel *chan = buf->backend.chan;
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- ssize_t spliced;
- int ret;
-
---- a/drivers/staging/lttng/lib/ringbuffer/ring_buffer_vfs.c
-+++ b/drivers/staging/lttng/lib/ringbuffer/ring_buffer_vfs.c
-@@ -1,11 +1,23 @@
- /*
- * ring_buffer_vfs.c
- *
-- * Copyright (C) 2009-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
- * Ring Buffer VFS file operations.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/module.h>
-@@ -88,7 +100,7 @@ unsigned int lib_ring_buffer_poll(struct
- unsigned int mask = 0;
- struct lib_ring_buffer *buf = filp->private_data;
- struct channel *chan = buf->backend.chan;
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
- int finalized, disabled;
-
- if (filp->f_mode & FMODE_READ) {
-@@ -165,7 +177,7 @@ long lib_ring_buffer_ioctl(struct file *
- {
- struct lib_ring_buffer *buf = filp->private_data;
- struct channel *chan = buf->backend.chan;
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
-
- if (lib_ring_buffer_channel_is_disabled(chan))
- return -EIO;
-@@ -262,7 +274,7 @@ long lib_ring_buffer_compat_ioctl(struct
- {
- struct lib_ring_buffer *buf = filp->private_data;
- struct channel *chan = buf->backend.chan;
-- const struct lib_ring_buffer_config *config = chan->backend.config;
-+ const struct lib_ring_buffer_config *config = &chan->backend.config;
-
- if (lib_ring_buffer_channel_is_disabled(chan))
- return -EIO;
---- a/drivers/staging/lttng/lib/ringbuffer/vatomic.h
-+++ b/drivers/staging/lttng/lib/ringbuffer/vatomic.h
-@@ -1,12 +1,24 @@
--#ifndef _LINUX_RING_BUFFER_VATOMIC_H
--#define _LINUX_RING_BUFFER_VATOMIC_H
-+#ifndef _LIB_RING_BUFFER_VATOMIC_H
-+#define _LIB_RING_BUFFER_VATOMIC_H
-
- /*
-- * linux/ringbuffer/vatomic.h
-+ * lib/ringbuffer/vatomic.h
- *
-- * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <asm/atomic.h>
-@@ -82,4 +94,4 @@ long v_cmpxchg(const struct lib_ring_buf
- return atomic_long_cmpxchg(&v_a->a, old, _new);
- }
-
--#endif /* _LINUX_RING_BUFFER_VATOMIC_H */
-+#endif /* _LIB_RING_BUFFER_VATOMIC_H */
---- a/drivers/staging/lttng/lib/ringbuffer/vfs.h
-+++ b/drivers/staging/lttng/lib/ringbuffer/vfs.h
-@@ -1,17 +1,29 @@
--#ifndef _LINUX_RING_BUFFER_VFS_H
--#define _LINUX_RING_BUFFER_VFS_H
-+#ifndef _LIB_RING_BUFFER_VFS_H
-+#define _LIB_RING_BUFFER_VFS_H
-
- /*
-- * linux/ringbuffer/vfs.h
-- *
-- * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * lib/ringbuffer/vfs.h
- *
- * Wait-free ring buffer VFS file operations.
- *
-+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ *
- * Author:
- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
-- * Dual LGPL v2.1/GPL v2 license.
- */
-
- #include <linux/fs.h>
-@@ -86,4 +98,4 @@ long lib_ring_buffer_compat_ioctl(struct
- /* flush the current sub-buffer */
- #define RING_BUFFER_FLUSH _IO(0xF6, 0x0C)
-
--#endif /* _LINUX_RING_BUFFER_VFS_H */
-+#endif /* _LIB_RING_BUFFER_VFS_H */
---- a/drivers/staging/lttng/ltt-context.c
-+++ /dev/null
-@@ -1,93 +0,0 @@
--/*
-- * ltt-context.c
-- *
-- * Copyright 2011 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
-- * LTTng trace/channel/event context management.
-- *
-- * Dual LGPL v2.1/GPL v2 license.
-- */
--
--#include <linux/module.h>
--#include <linux/list.h>
--#include <linux/mutex.h>
--#include <linux/slab.h>
--#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
--#include "ltt-events.h"
--#include "ltt-tracer.h"
--
--int lttng_find_context(struct lttng_ctx *ctx, const char *name)
--{
-- unsigned int i;
--
-- for (i = 0; i < ctx->nr_fields; i++) {
-- /* Skip allocated (but non-initialized) contexts */
-- if (!ctx->fields[i].event_field.name)
-- continue;
-- if (!strcmp(ctx->fields[i].event_field.name, name))
-- return 1;
-- }
-- return 0;
--}
--EXPORT_SYMBOL_GPL(lttng_find_context);
--
--/*
-- * Note: as we append context information, the pointer location may change.
-- */
--struct lttng_ctx_field *lttng_append_context(struct lttng_ctx **ctx_p)
--{
-- struct lttng_ctx_field *field;
-- struct lttng_ctx *ctx;
--
-- if (!*ctx_p) {
-- *ctx_p = kzalloc(sizeof(struct lttng_ctx), GFP_KERNEL);
-- if (!*ctx_p)
-- return NULL;
-- }
-- ctx = *ctx_p;
-- if (ctx->nr_fields + 1 > ctx->allocated_fields) {
-- struct lttng_ctx_field *new_fields;
--
-- ctx->allocated_fields = max_t(size_t, 1, 2 * ctx->allocated_fields);
-- new_fields = kzalloc(ctx->allocated_fields * sizeof(struct lttng_ctx_field), GFP_KERNEL);
-- if (!new_fields)
-- return NULL;
-- if (ctx->fields)
-- memcpy(new_fields, ctx->fields, sizeof(*ctx->fields) * ctx->nr_fields);
-- kfree(ctx->fields);
-- ctx->fields = new_fields;
-- }
-- field = &ctx->fields[ctx->nr_fields];
-- ctx->nr_fields++;
-- return field;
--}
--EXPORT_SYMBOL_GPL(lttng_append_context);
--
--/*
-- * Remove last context field.
-- */
--void lttng_remove_context_field(struct lttng_ctx **ctx_p,
-- struct lttng_ctx_field *field)
--{
-- struct lttng_ctx *ctx;
--
-- ctx = *ctx_p;
-- ctx->nr_fields--;
-- WARN_ON_ONCE(&ctx->fields[ctx->nr_fields] != field);
-- memset(&ctx->fields[ctx->nr_fields], 0, sizeof(struct lttng_ctx_field));
--}
--EXPORT_SYMBOL_GPL(lttng_remove_context_field);
--
--void lttng_destroy_context(struct lttng_ctx *ctx)
--{
-- int i;
--
-- if (!ctx)
-- return;
-- for (i = 0; i < ctx->nr_fields; i++) {
-- if (ctx->fields[i].destroy)
-- ctx->fields[i].destroy(&ctx->fields[i]);
-- }
-- kfree(ctx->fields);
-- kfree(ctx);
--}
---- a/drivers/staging/lttng/ltt-debugfs-abi.c
-+++ /dev/null
-@@ -1,777 +0,0 @@
--/*
-- * ltt-debugfs-abi.c
-- *
-- * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
-- * LTTng debugfs ABI
-- *
-- * Mimic system calls for:
-- * - session creation, returns a file descriptor or failure.
-- * - channel creation, returns a file descriptor or failure.
-- * - Operates on a session file descriptor
-- * - Takes all channel options as parameters.
-- * - stream get, returns a file descriptor or failure.
-- * - Operates on a channel file descriptor.
-- * - stream notifier get, returns a file descriptor or failure.
-- * - Operates on a channel file descriptor.
-- * - event creation, returns a file descriptor or failure.
-- * - Operates on a channel file descriptor
-- * - Takes an event name as parameter
-- * - Takes an instrumentation source as parameter
-- * - e.g. tracepoints, dynamic_probes...
-- * - Takes instrumentation source specific arguments.
-- *
-- * Dual LGPL v2.1/GPL v2 license.
-- */
--
--#include <linux/module.h>
--#include <linux/debugfs.h>
--#include <linux/proc_fs.h>
--#include <linux/anon_inodes.h>
--#include <linux/file.h>
--#include <linux/uaccess.h>
--#include <linux/slab.h>
--#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
--#include "wrapper/ringbuffer/vfs.h"
--#include "wrapper/poll.h"
--#include "ltt-debugfs-abi.h"
--#include "ltt-events.h"
--#include "ltt-tracer.h"
--
--/*
-- * This is LTTng's own personal way to create a system call as an external
-- * module. We use ioctl() on /sys/kernel/debug/lttng.
-- */
--
--static struct dentry *lttng_dentry;
--static struct proc_dir_entry *lttng_proc_dentry;
--static const struct file_operations lttng_fops;
--static const struct file_operations lttng_session_fops;
--static const struct file_operations lttng_channel_fops;
--static const struct file_operations lttng_metadata_fops;
--static const struct file_operations lttng_event_fops;
--
--/*
-- * Teardown management: opened file descriptors keep a refcount on the module,
-- * so it can only exit when all file descriptors are closed.
-- */
--
--enum channel_type {
-- PER_CPU_CHANNEL,
-- METADATA_CHANNEL,
--};
--
--static
--int lttng_abi_create_session(void)
--{
-- struct ltt_session *session;
-- struct file *session_file;
-- int session_fd, ret;
--
-- session = ltt_session_create();
-- if (!session)
-- return -ENOMEM;
-- session_fd = get_unused_fd();
-- if (session_fd < 0) {
-- ret = session_fd;
-- goto fd_error;
-- }
-- session_file = anon_inode_getfile("[lttng_session]",
-- &lttng_session_fops,
-- session, O_RDWR);
-- if (IS_ERR(session_file)) {
-- ret = PTR_ERR(session_file);
-- goto file_error;
-- }
-- session->file = session_file;
-- fd_install(session_fd, session_file);
-- return session_fd;
--
--file_error:
-- put_unused_fd(session_fd);
--fd_error:
-- ltt_session_destroy(session);
-- return ret;
--}
--
--static
--int lttng_abi_tracepoint_list(void)
--{
-- struct file *tracepoint_list_file;
-- int file_fd, ret;
--
-- file_fd = get_unused_fd();
-- if (file_fd < 0) {
-- ret = file_fd;
-- goto fd_error;
-- }
--
-- tracepoint_list_file = anon_inode_getfile("[lttng_session]",
-- &lttng_tracepoint_list_fops,
-- NULL, O_RDWR);
-- if (IS_ERR(tracepoint_list_file)) {
-- ret = PTR_ERR(tracepoint_list_file);
-- goto file_error;
-- }
-- ret = lttng_tracepoint_list_fops.open(NULL, tracepoint_list_file);
-- if (ret < 0)
-- goto open_error;
-- fd_install(file_fd, tracepoint_list_file);
-- if (file_fd < 0) {
-- ret = file_fd;
-- goto fd_error;
-- }
-- return file_fd;
--
--open_error:
-- fput(tracepoint_list_file);
--file_error:
-- put_unused_fd(file_fd);
--fd_error:
-- return ret;
--}
--
--static
--long lttng_abi_tracer_version(struct file *file,
-- struct lttng_kernel_tracer_version __user *uversion_param)
--{
-- struct lttng_kernel_tracer_version v;
--
-- v.version = LTTNG_VERSION;
-- v.patchlevel = LTTNG_PATCHLEVEL;
-- v.sublevel = LTTNG_SUBLEVEL;
--
-- if (copy_to_user(uversion_param, &v, sizeof(v)))
-- return -EFAULT;
-- return 0;
--}
--
--static
--long lttng_abi_add_context(struct file *file,
-- struct lttng_kernel_context __user *ucontext_param,
-- struct lttng_ctx **ctx, struct ltt_session *session)
--{
-- struct lttng_kernel_context context_param;
--
-- if (session->been_active)
-- return -EPERM;
--
-- if (copy_from_user(&context_param, ucontext_param, sizeof(context_param)))
-- return -EFAULT;
--
-- switch (context_param.ctx) {
-- case LTTNG_KERNEL_CONTEXT_PID:
-- return lttng_add_pid_to_ctx(ctx);
-- case LTTNG_KERNEL_CONTEXT_PRIO:
-- return lttng_add_prio_to_ctx(ctx);
-- case LTTNG_KERNEL_CONTEXT_NICE:
-- return lttng_add_nice_to_ctx(ctx);
-- case LTTNG_KERNEL_CONTEXT_VPID:
-- return lttng_add_vpid_to_ctx(ctx);
-- case LTTNG_KERNEL_CONTEXT_TID:
-- return lttng_add_tid_to_ctx(ctx);
-- case LTTNG_KERNEL_CONTEXT_VTID:
-- return lttng_add_vtid_to_ctx(ctx);
-- case LTTNG_KERNEL_CONTEXT_PPID:
-- return lttng_add_ppid_to_ctx(ctx);
-- case LTTNG_KERNEL_CONTEXT_VPPID:
-- return lttng_add_vppid_to_ctx(ctx);
-- case LTTNG_KERNEL_CONTEXT_PERF_COUNTER:
-- context_param.u.perf_counter.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
-- return lttng_add_perf_counter_to_ctx(context_param.u.perf_counter.type,
-- context_param.u.perf_counter.config,
-- context_param.u.perf_counter.name,
-- ctx);
-- case LTTNG_KERNEL_CONTEXT_PROCNAME:
-- return lttng_add_procname_to_ctx(ctx);
-- default:
-- return -EINVAL;
-- }
--}
--
--/**
-- * lttng_ioctl - lttng syscall through ioctl
-- *
-- * @file: the file
-- * @cmd: the command
-- * @arg: command arg
-- *
-- * This ioctl implements lttng commands:
-- * LTTNG_KERNEL_SESSION
-- * Returns a LTTng trace session file descriptor
-- * LTTNG_KERNEL_TRACER_VERSION
-- * Returns the LTTng kernel tracer version
-- * LTTNG_KERNEL_TRACEPOINT_LIST
-- * Returns a file descriptor listing available tracepoints
-- * LTTNG_KERNEL_WAIT_QUIESCENT
-- * Returns after all previously running probes have completed
-- *
-- * The returned session will be deleted when its file descriptor is closed.
-- */
--static
--long lttng_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
--{
-- switch (cmd) {
-- case LTTNG_KERNEL_SESSION:
-- return lttng_abi_create_session();
-- case LTTNG_KERNEL_TRACER_VERSION:
-- return lttng_abi_tracer_version(file,
-- (struct lttng_kernel_tracer_version __user *) arg);
-- case LTTNG_KERNEL_TRACEPOINT_LIST:
-- return lttng_abi_tracepoint_list();
-- case LTTNG_KERNEL_WAIT_QUIESCENT:
-- synchronize_trace();
-- return 0;
-- case LTTNG_KERNEL_CALIBRATE:
-- {
-- struct lttng_kernel_calibrate __user *ucalibrate =
-- (struct lttng_kernel_calibrate __user *) arg;
-- struct lttng_kernel_calibrate calibrate;
-- int ret;
--
-- if (copy_from_user(&calibrate, ucalibrate, sizeof(calibrate)))
-- return -EFAULT;
-- ret = lttng_calibrate(&calibrate);
-- if (copy_to_user(ucalibrate, &calibrate, sizeof(calibrate)))
-- return -EFAULT;
-- return ret;
-- }
-- default:
-- return -ENOIOCTLCMD;
-- }
--}
--
--static const struct file_operations lttng_fops = {
-- .owner = THIS_MODULE,
-- .unlocked_ioctl = lttng_ioctl,
--#ifdef CONFIG_COMPAT
-- .compat_ioctl = lttng_ioctl,
--#endif
--};
--
--/*
-- * We tolerate no failure in this function (if one happens, we print a dmesg
-- * error, but cannot return any error, because the channel information is
-- * invariant.
-- */
--static
--void lttng_metadata_create_events(struct file *channel_file)
--{
-- struct ltt_channel *channel = channel_file->private_data;
-- static struct lttng_kernel_event metadata_params = {
-- .instrumentation = LTTNG_KERNEL_TRACEPOINT,
-- .name = "lttng_metadata",
-- };
-- struct ltt_event *event;
--
-- /*
-- * We tolerate no failure path after event creation. It will stay
-- * invariant for the rest of the session.
-- */
-- event = ltt_event_create(channel, &metadata_params, NULL, NULL);
-- if (!event) {
-- goto create_error;
-- }
-- return;
--
--create_error:
-- WARN_ON(1);
-- return; /* not allowed to return error */
--}
--
--static
--int lttng_abi_create_channel(struct file *session_file,
-- struct lttng_kernel_channel __user *uchan_param,
-- enum channel_type channel_type)
--{
-- struct ltt_session *session = session_file->private_data;
-- const struct file_operations *fops = NULL;
-- const char *transport_name;
-- struct ltt_channel *chan;
-- struct file *chan_file;
-- struct lttng_kernel_channel chan_param;
-- int chan_fd;
-- int ret = 0;
--
-- if (copy_from_user(&chan_param, uchan_param, sizeof(chan_param)))
-- return -EFAULT;
-- chan_fd = get_unused_fd();
-- if (chan_fd < 0) {
-- ret = chan_fd;
-- goto fd_error;
-- }
-- switch (channel_type) {
-- case PER_CPU_CHANNEL:
-- fops = &lttng_channel_fops;
-- break;
-- case METADATA_CHANNEL:
-- fops = &lttng_metadata_fops;
-- break;
-- }
--
-- chan_file = anon_inode_getfile("[lttng_channel]",
-- fops,
-- NULL, O_RDWR);
-- if (IS_ERR(chan_file)) {
-- ret = PTR_ERR(chan_file);
-- goto file_error;
-- }
-- switch (channel_type) {
-- case PER_CPU_CHANNEL:
-- if (chan_param.output == LTTNG_KERNEL_SPLICE) {
-- transport_name = chan_param.overwrite ?
-- "relay-overwrite" : "relay-discard";
-- } else if (chan_param.output == LTTNG_KERNEL_MMAP) {
-- transport_name = chan_param.overwrite ?
-- "relay-overwrite-mmap" : "relay-discard-mmap";
-- } else {
-- return -EINVAL;
-- }
-- break;
-- case METADATA_CHANNEL:
-- if (chan_param.output == LTTNG_KERNEL_SPLICE)
-- transport_name = "relay-metadata";
-- else if (chan_param.output == LTTNG_KERNEL_MMAP)
-- transport_name = "relay-metadata-mmap";
-- else
-- return -EINVAL;
-- break;
-- default:
-- transport_name = "<unknown>";
-- break;
-- }
-- /*
-- * We tolerate no failure path after channel creation. It will stay
-- * invariant for the rest of the session.
-- */
-- chan = ltt_channel_create(session, transport_name, NULL,
-- chan_param.subbuf_size,
-- chan_param.num_subbuf,
-- chan_param.switch_timer_interval,
-- chan_param.read_timer_interval);
-- if (!chan) {
-- ret = -EINVAL;
-- goto chan_error;
-- }
-- chan->file = chan_file;
-- chan_file->private_data = chan;
-- fd_install(chan_fd, chan_file);
-- if (channel_type == METADATA_CHANNEL) {
-- session->metadata = chan;
-- lttng_metadata_create_events(chan_file);
-- }
--
-- /* The channel created holds a reference on the session */
-- atomic_long_inc(&session_file->f_count);
--
-- return chan_fd;
--
--chan_error:
-- fput(chan_file);
--file_error:
-- put_unused_fd(chan_fd);
--fd_error:
-- return ret;
--}
--
--/**
-- * lttng_session_ioctl - lttng session fd ioctl
-- *
-- * @file: the file
-- * @cmd: the command
-- * @arg: command arg
-- *
-- * This ioctl implements lttng commands:
-- * LTTNG_KERNEL_CHANNEL
-- * Returns a LTTng channel file descriptor
-- * LTTNG_KERNEL_ENABLE
-- * Enables tracing for a session (weak enable)
-- * LTTNG_KERNEL_DISABLE
-- * Disables tracing for a session (strong disable)
-- * LTTNG_KERNEL_METADATA
-- * Returns a LTTng metadata file descriptor
-- *
-- * The returned channel will be deleted when its file descriptor is closed.
-- */
--static
--long lttng_session_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
--{
-- struct ltt_session *session = file->private_data;
--
-- switch (cmd) {
-- case LTTNG_KERNEL_CHANNEL:
-- return lttng_abi_create_channel(file,
-- (struct lttng_kernel_channel __user *) arg,
-- PER_CPU_CHANNEL);
-- case LTTNG_KERNEL_SESSION_START:
-- case LTTNG_KERNEL_ENABLE:
-- return ltt_session_enable(session);
-- case LTTNG_KERNEL_SESSION_STOP:
-- case LTTNG_KERNEL_DISABLE:
-- return ltt_session_disable(session);
-- case LTTNG_KERNEL_METADATA:
-- return lttng_abi_create_channel(file,
-- (struct lttng_kernel_channel __user *) arg,
-- METADATA_CHANNEL);
-- default:
-- return -ENOIOCTLCMD;
-- }
--}
--
--/*
-- * Called when the last file reference is dropped.
-- *
-- * Big fat note: channels and events are invariant for the whole session after
-- * their creation. So this session destruction also destroys all channel and
-- * event structures specific to this session (they are not destroyed when their
-- * individual file is released).
-- */
--static
--int lttng_session_release(struct inode *inode, struct file *file)
--{
-- struct ltt_session *session = file->private_data;
--
-- if (session)
-- ltt_session_destroy(session);
-- return 0;
--}
--
--static const struct file_operations lttng_session_fops = {
-- .owner = THIS_MODULE,
-- .release = lttng_session_release,
-- .unlocked_ioctl = lttng_session_ioctl,
--#ifdef CONFIG_COMPAT
-- .compat_ioctl = lttng_session_ioctl,
--#endif
--};
--
--static
--int lttng_abi_open_stream(struct file *channel_file)
--{
-- struct ltt_channel *channel = channel_file->private_data;
-- struct lib_ring_buffer *buf;
-- int stream_fd, ret;
-- struct file *stream_file;
--
-- buf = channel->ops->buffer_read_open(channel->chan);
-- if (!buf)
-- return -ENOENT;
--
-- stream_fd = get_unused_fd();
-- if (stream_fd < 0) {
-- ret = stream_fd;
-- goto fd_error;
-- }
-- stream_file = anon_inode_getfile("[lttng_stream]",
-- &lib_ring_buffer_file_operations,
-- buf, O_RDWR);
-- if (IS_ERR(stream_file)) {
-- ret = PTR_ERR(stream_file);
-- goto file_error;
-- }
-- /*
-- * OPEN_FMODE, called within anon_inode_getfile/alloc_file, don't honor
-- * FMODE_LSEEK, FMODE_PREAD nor FMODE_PWRITE. We need to read from this
-- * file descriptor, so we set FMODE_PREAD here.
-- */
-- stream_file->f_mode |= FMODE_PREAD;
-- fd_install(stream_fd, stream_file);
-- /*
-- * The stream holds a reference to the channel within the generic ring
-- * buffer library, so no need to hold a refcount on the channel and
-- * session files here.
-- */
-- return stream_fd;
--
--file_error:
-- put_unused_fd(stream_fd);
--fd_error:
-- channel->ops->buffer_read_close(buf);
-- return ret;
--}
--
--static
--int lttng_abi_create_event(struct file *channel_file,
-- struct lttng_kernel_event __user *uevent_param)
--{
-- struct ltt_channel *channel = channel_file->private_data;
-- struct ltt_event *event;
-- struct lttng_kernel_event event_param;
-- int event_fd, ret;
-- struct file *event_file;
--
-- if (copy_from_user(&event_param, uevent_param, sizeof(event_param)))
-- return -EFAULT;
-- event_param.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
-- switch (event_param.instrumentation) {
-- case LTTNG_KERNEL_KRETPROBE:
-- event_param.u.kretprobe.symbol_name[LTTNG_SYM_NAME_LEN - 1] = '\0';
-- break;
-- case LTTNG_KERNEL_KPROBE:
-- event_param.u.kprobe.symbol_name[LTTNG_SYM_NAME_LEN - 1] = '\0';
-- break;
-- case LTTNG_KERNEL_FUNCTION:
-- event_param.u.ftrace.symbol_name[LTTNG_SYM_NAME_LEN - 1] = '\0';
-- break;
-- default:
-- break;
-- }
-- switch (event_param.instrumentation) {
-- default:
-- event_fd = get_unused_fd();
-- if (event_fd < 0) {
-- ret = event_fd;
-- goto fd_error;
-- }
-- event_file = anon_inode_getfile("[lttng_event]",
-- &lttng_event_fops,
-- NULL, O_RDWR);
-- if (IS_ERR(event_file)) {
-- ret = PTR_ERR(event_file);
-- goto file_error;
-- }
-- /*
-- * We tolerate no failure path after event creation. It
-- * will stay invariant for the rest of the session.
-- */
-- event = ltt_event_create(channel, &event_param, NULL, NULL);
-- if (!event) {
-- ret = -EINVAL;
-- goto event_error;
-- }
-- event_file->private_data = event;
-- fd_install(event_fd, event_file);
-- /* The event holds a reference on the channel */
-- atomic_long_inc(&channel_file->f_count);
-- break;
-- case LTTNG_KERNEL_SYSCALL:
-- /*
-- * Only all-syscall tracing supported for now.
-- */
-- if (event_param.name[0] != '\0')
-- return -EINVAL;
-- ret = lttng_syscalls_register(channel, NULL);
-- if (ret)
-- goto fd_error;
-- event_fd = 0;
-- break;
-- }
-- return event_fd;
--
--event_error:
-- fput(event_file);
--file_error:
-- put_unused_fd(event_fd);
--fd_error:
-- return ret;
--}
--
--/**
-- * lttng_channel_ioctl - lttng syscall through ioctl
-- *
-- * @file: the file
-- * @cmd: the command
-- * @arg: command arg
-- *
-- * This ioctl implements lttng commands:
-- * LTTNG_KERNEL_STREAM
-- * Returns an event stream file descriptor or failure.
-- * (typically, one event stream records events from one CPU)
-- * LTTNG_KERNEL_EVENT
-- * Returns an event file descriptor or failure.
-- * LTTNG_KERNEL_CONTEXT
-- * Prepend a context field to each event in the channel
-- * LTTNG_KERNEL_ENABLE
-- * Enable recording for events in this channel (weak enable)
-- * LTTNG_KERNEL_DISABLE
-- * Disable recording for events in this channel (strong disable)
-- *
-- * Channel and event file descriptors also hold a reference on the session.
-- */
--static
--long lttng_channel_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
--{
-- struct ltt_channel *channel = file->private_data;
--
-- switch (cmd) {
-- case LTTNG_KERNEL_STREAM:
-- return lttng_abi_open_stream(file);
-- case LTTNG_KERNEL_EVENT:
-- return lttng_abi_create_event(file, (struct lttng_kernel_event __user *) arg);
-- case LTTNG_KERNEL_CONTEXT:
-- return lttng_abi_add_context(file,
-- (struct lttng_kernel_context __user *) arg,
-- &channel->ctx, channel->session);
-- case LTTNG_KERNEL_ENABLE:
-- return ltt_channel_enable(channel);
-- case LTTNG_KERNEL_DISABLE:
-- return ltt_channel_disable(channel);
-- default:
-- return -ENOIOCTLCMD;
-- }
--}
--
--/**
-- * lttng_metadata_ioctl - lttng syscall through ioctl
-- *
-- * @file: the file
-- * @cmd: the command
-- * @arg: command arg
-- *
-- * This ioctl implements lttng commands:
-- * LTTNG_KERNEL_STREAM
-- * Returns an event stream file descriptor or failure.
-- *
-- * Channel and event file descriptors also hold a reference on the session.
-- */
--static
--long lttng_metadata_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
--{
-- switch (cmd) {
-- case LTTNG_KERNEL_STREAM:
-- return lttng_abi_open_stream(file);
-- default:
-- return -ENOIOCTLCMD;
-- }
--}
--
--/**
-- * lttng_channel_poll - lttng stream addition/removal monitoring
-- *
-- * @file: the file
-- * @wait: poll table
-- */
--unsigned int lttng_channel_poll(struct file *file, poll_table *wait)
--{
-- struct ltt_channel *channel = file->private_data;
-- unsigned int mask = 0;
--
-- if (file->f_mode & FMODE_READ) {
-- poll_wait_set_exclusive(wait);
-- poll_wait(file, channel->ops->get_hp_wait_queue(channel->chan),
-- wait);
--
-- if (channel->ops->is_disabled(channel->chan))
-- return POLLERR;
-- if (channel->ops->is_finalized(channel->chan))
-- return POLLHUP;
-- if (channel->ops->buffer_has_read_closed_stream(channel->chan))
-- return POLLIN | POLLRDNORM;
-- return 0;
-- }
-- return mask;
--
--}
--
--static
--int lttng_channel_release(struct inode *inode, struct file *file)
--{
-- struct ltt_channel *channel = file->private_data;
--
-- if (channel)
-- fput(channel->session->file);
-- return 0;
--}
--
--static const struct file_operations lttng_channel_fops = {
-- .owner = THIS_MODULE,
-- .release = lttng_channel_release,
-- .poll = lttng_channel_poll,
-- .unlocked_ioctl = lttng_channel_ioctl,
--#ifdef CONFIG_COMPAT
-- .compat_ioctl = lttng_channel_ioctl,
--#endif
--};
--
--static const struct file_operations lttng_metadata_fops = {
-- .owner = THIS_MODULE,
-- .release = lttng_channel_release,
-- .unlocked_ioctl = lttng_metadata_ioctl,
--#ifdef CONFIG_COMPAT
-- .compat_ioctl = lttng_metadata_ioctl,
--#endif
--};
--
--/**
-- * lttng_event_ioctl - lttng syscall through ioctl
-- *
-- * @file: the file
-- * @cmd: the command
-- * @arg: command arg
-- *
-- * This ioctl implements lttng commands:
-- * LTTNG_KERNEL_CONTEXT
-- * Prepend a context field to each record of this event
-- * LTTNG_KERNEL_ENABLE
-- * Enable recording for this event (weak enable)
-- * LTTNG_KERNEL_DISABLE
-- * Disable recording for this event (strong disable)
-- */
--static
--long lttng_event_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
--{
-- struct ltt_event *event = file->private_data;
--
-- switch (cmd) {
-- case LTTNG_KERNEL_CONTEXT:
-- return lttng_abi_add_context(file,
-- (struct lttng_kernel_context __user *) arg,
-- &event->ctx, event->chan->session);
-- case LTTNG_KERNEL_ENABLE:
-- return ltt_event_enable(event);
-- case LTTNG_KERNEL_DISABLE:
-- return ltt_event_disable(event);
-- default:
-- return -ENOIOCTLCMD;
-- }
--}
--
--static
--int lttng_event_release(struct inode *inode, struct file *file)
--{
-- struct ltt_event *event = file->private_data;
--
-- if (event)
-- fput(event->chan->file);
-- return 0;
--}
--
--/* TODO: filter control ioctl */
--static const struct file_operations lttng_event_fops = {
-- .owner = THIS_MODULE,
-- .release = lttng_event_release,
-- .unlocked_ioctl = lttng_event_ioctl,
--#ifdef CONFIG_COMPAT
-- .compat_ioctl = lttng_event_ioctl,
--#endif
--};
--
--int __init ltt_debugfs_abi_init(void)
--{
-- int ret = 0;
--
-- wrapper_vmalloc_sync_all();
-- lttng_dentry = debugfs_create_file("lttng", S_IWUSR, NULL, NULL,
-- &lttng_fops);
-- if (IS_ERR(lttng_dentry))
-- lttng_dentry = NULL;
--
-- lttng_proc_dentry = proc_create_data("lttng", S_IWUSR, NULL,
-- &lttng_fops, NULL);
--
-- if (!lttng_dentry && !lttng_proc_dentry) {
-- printk(KERN_ERR "Error creating LTTng control file\n");
-- ret = -ENOMEM;
-- goto error;
-- }
--error:
-- return ret;
--}
--
--void __exit ltt_debugfs_abi_exit(void)
--{
-- if (lttng_dentry)
-- debugfs_remove(lttng_dentry);
-- if (lttng_proc_dentry)
-- remove_proc_entry("lttng", NULL);
--}
---- a/drivers/staging/lttng/ltt-debugfs-abi.h
-+++ /dev/null
-@@ -1,153 +0,0 @@
--#ifndef _LTT_DEBUGFS_ABI_H
--#define _LTT_DEBUGFS_ABI_H
--
--/*
-- * ltt-debugfs-abi.h
-- *
-- * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
-- * LTTng debugfs ABI header
-- *
-- * Dual LGPL v2.1/GPL v2 license.
-- */
--
--#include <linux/fs.h>
--
--#define LTTNG_SYM_NAME_LEN 256
--
--enum lttng_kernel_instrumentation {
-- LTTNG_KERNEL_TRACEPOINT = 0,
-- LTTNG_KERNEL_KPROBE = 1,
-- LTTNG_KERNEL_FUNCTION = 2,
-- LTTNG_KERNEL_KRETPROBE = 3,
-- LTTNG_KERNEL_NOOP = 4, /* not hooked */
-- LTTNG_KERNEL_SYSCALL = 5,
--};
--
--/*
-- * LTTng consumer mode
-- */
--enum lttng_kernel_output {
-- LTTNG_KERNEL_SPLICE = 0,
-- LTTNG_KERNEL_MMAP = 1,
--};
--
--/*
-- * LTTng DebugFS ABI structures.
-- */
--
--struct lttng_kernel_channel {
-- int overwrite; /* 1: overwrite, 0: discard */
-- uint64_t subbuf_size; /* in bytes */
-- uint64_t num_subbuf;
-- unsigned int switch_timer_interval; /* usecs */
-- unsigned int read_timer_interval; /* usecs */
-- enum lttng_kernel_output output; /* splice, mmap */
--};
--
--struct lttng_kernel_kretprobe {
-- uint64_t addr;
--
-- uint64_t offset;
-- char symbol_name[LTTNG_SYM_NAME_LEN];
--};
--
--/*
-- * Either addr is used, or symbol_name and offset.
-- */
--struct lttng_kernel_kprobe {
-- uint64_t addr;
--
-- uint64_t offset;
-- char symbol_name[LTTNG_SYM_NAME_LEN];
--};
--
--struct lttng_kernel_function_tracer {
-- char symbol_name[LTTNG_SYM_NAME_LEN];
--};
--
--/*
-- * For syscall tracing, name = '\0' means "enable all".
-- */
--struct lttng_kernel_event {
-- char name[LTTNG_SYM_NAME_LEN]; /* event name */
-- enum lttng_kernel_instrumentation instrumentation;
-- /* Per instrumentation type configuration */
-- union {
-- struct lttng_kernel_kretprobe kretprobe;
-- struct lttng_kernel_kprobe kprobe;
-- struct lttng_kernel_function_tracer ftrace;
-- } u;
--};
--
--struct lttng_kernel_tracer_version {
-- uint32_t version;
-- uint32_t patchlevel;
-- uint32_t sublevel;
--};
--
--enum lttng_kernel_calibrate_type {
-- LTTNG_KERNEL_CALIBRATE_KRETPROBE,
--};
--
--struct lttng_kernel_calibrate {
-- enum lttng_kernel_calibrate_type type; /* type (input) */
--};
--
--enum lttng_kernel_context_type {
-- LTTNG_KERNEL_CONTEXT_PID = 0,
-- LTTNG_KERNEL_CONTEXT_PERF_COUNTER = 1,
-- LTTNG_KERNEL_CONTEXT_PROCNAME = 2,
-- LTTNG_KERNEL_CONTEXT_PRIO = 3,
-- LTTNG_KERNEL_CONTEXT_NICE = 4,
-- LTTNG_KERNEL_CONTEXT_VPID = 5,
-- LTTNG_KERNEL_CONTEXT_TID = 6,
-- LTTNG_KERNEL_CONTEXT_VTID = 7,
-- LTTNG_KERNEL_CONTEXT_PPID = 8,
-- LTTNG_KERNEL_CONTEXT_VPPID = 9,
--};
--
--struct lttng_kernel_perf_counter_ctx {
-- uint32_t type;
-- uint64_t config;
-- char name[LTTNG_SYM_NAME_LEN];
--};
--
--struct lttng_kernel_context {
-- enum lttng_kernel_context_type ctx;
-- union {
-- struct lttng_kernel_perf_counter_ctx perf_counter;
-- } u;
--};
--
--/* LTTng file descriptor ioctl */
--#define LTTNG_KERNEL_SESSION _IO(0xF6, 0x40)
--#define LTTNG_KERNEL_TRACER_VERSION \
-- _IOR(0xF6, 0x41, struct lttng_kernel_tracer_version)
--#define LTTNG_KERNEL_TRACEPOINT_LIST _IO(0xF6, 0x42)
--#define LTTNG_KERNEL_WAIT_QUIESCENT _IO(0xF6, 0x43)
--#define LTTNG_KERNEL_CALIBRATE \
-- _IOWR(0xF6, 0x44, struct lttng_kernel_calibrate)
--
--/* Session FD ioctl */
--#define LTTNG_KERNEL_METADATA \
-- _IOW(0xF6, 0x50, struct lttng_kernel_channel)
--#define LTTNG_KERNEL_CHANNEL \
-- _IOW(0xF6, 0x51, struct lttng_kernel_channel)
--#define LTTNG_KERNEL_SESSION_START _IO(0xF6, 0x52)
--#define LTTNG_KERNEL_SESSION_STOP _IO(0xF6, 0x53)
--
--/* Channel FD ioctl */
--#define LTTNG_KERNEL_STREAM _IO(0xF6, 0x60)
--#define LTTNG_KERNEL_EVENT \
-- _IOW(0xF6, 0x61, struct lttng_kernel_event)
--
--/* Event and Channel FD ioctl */
--#define LTTNG_KERNEL_CONTEXT \
-- _IOW(0xF6, 0x70, struct lttng_kernel_context)
--
--/* Event, Channel and Session ioctl */
--#define LTTNG_KERNEL_ENABLE _IO(0xF6, 0x80)
--#define LTTNG_KERNEL_DISABLE _IO(0xF6, 0x81)
--
--#endif /* _LTT_DEBUGFS_ABI_H */
---- a/drivers/staging/lttng/ltt-endian.h
-+++ /dev/null
-@@ -1,31 +0,0 @@
--#ifndef _LTT_ENDIAN_H
--#define _LTT_ENDIAN_H
--
--/*
-- * ltt-endian.h
-- *
-- * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
-- * Dual LGPL v2.1/GPL v2 license.
-- */
--
--#ifdef __KERNEL__
--# include <asm/byteorder.h>
--# ifdef __BIG_ENDIAN
--# define __BYTE_ORDER __BIG_ENDIAN
--# elif defined(__LITTLE_ENDIAN)
--# define __BYTE_ORDER __LITTLE_ENDIAN
--# else
--# error "unknown endianness"
--# endif
--#ifndef __BIG_ENDIAN
--# define __BIG_ENDIAN 4321
--#endif
--#ifndef __LITTLE_ENDIAN
--# define __LITTLE_ENDIAN 1234
--#endif
--#else
--# include <endian.h>
--#endif
--
--#endif /* _LTT_ENDIAN_H */
---- a/drivers/staging/lttng/ltt-events.c
-+++ /dev/null
-@@ -1,1009 +0,0 @@
--/*
-- * ltt-events.c
-- *
-- * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
-- * Holds LTTng per-session event registry.
-- *
-- * Dual LGPL v2.1/GPL v2 license.
-- */
--
--#include <linux/module.h>
--#include <linux/list.h>
--#include <linux/mutex.h>
--#include <linux/sched.h>
--#include <linux/slab.h>
--#include <linux/jiffies.h>
--#include "wrapper/uuid.h"
--#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
--#include "ltt-events.h"
--#include "ltt-tracer.h"
--
--static LIST_HEAD(sessions);
--static LIST_HEAD(ltt_transport_list);
--static DEFINE_MUTEX(sessions_mutex);
--static struct kmem_cache *event_cache;
--
--static void _ltt_event_destroy(struct ltt_event *event);
--static void _ltt_channel_destroy(struct ltt_channel *chan);
--static int _ltt_event_unregister(struct ltt_event *event);
--static
--int _ltt_event_metadata_statedump(struct ltt_session *session,
-- struct ltt_channel *chan,
-- struct ltt_event *event);
--static
--int _ltt_session_metadata_statedump(struct ltt_session *session);
--
--void synchronize_trace(void)
--{
-- synchronize_sched();
--#ifdef CONFIG_PREEMPT_RT
-- synchronize_rcu();
--#endif
--}
--
--struct ltt_session *ltt_session_create(void)
--{
-- struct ltt_session *session;
--
-- mutex_lock(&sessions_mutex);
-- session = kzalloc(sizeof(struct ltt_session), GFP_KERNEL);
-- if (!session)
-- return NULL;
-- INIT_LIST_HEAD(&session->chan);
-- INIT_LIST_HEAD(&session->events);
-- uuid_le_gen(&session->uuid);
-- list_add(&session->list, &sessions);
-- mutex_unlock(&sessions_mutex);
-- return session;
--}
--
--void ltt_session_destroy(struct ltt_session *session)
--{
-- struct ltt_channel *chan, *tmpchan;
-- struct ltt_event *event, *tmpevent;
-- int ret;
--
-- mutex_lock(&sessions_mutex);
-- ACCESS_ONCE(session->active) = 0;
-- list_for_each_entry(chan, &session->chan, list) {
-- ret = lttng_syscalls_unregister(chan);
-- WARN_ON(ret);
-- }
-- list_for_each_entry(event, &session->events, list) {
-- ret = _ltt_event_unregister(event);
-- WARN_ON(ret);
-- }
-- synchronize_trace(); /* Wait for in-flight events to complete */
-- list_for_each_entry_safe(event, tmpevent, &session->events, list)
-- _ltt_event_destroy(event);
-- list_for_each_entry_safe(chan, tmpchan, &session->chan, list)
-- _ltt_channel_destroy(chan);
-- list_del(&session->list);
-- mutex_unlock(&sessions_mutex);
-- kfree(session);
--}
--
--int ltt_session_enable(struct ltt_session *session)
--{
-- int ret = 0;
-- struct ltt_channel *chan;
--
-- mutex_lock(&sessions_mutex);
-- if (session->active) {
-- ret = -EBUSY;
-- goto end;
-- }
--
-- /*
-- * Snapshot the number of events per channel to know the type of header
-- * we need to use.
-- */
-- list_for_each_entry(chan, &session->chan, list) {
-- if (chan->header_type)
-- continue; /* don't change it if session stop/restart */
-- if (chan->free_event_id < 31)
-- chan->header_type = 1; /* compact */
-- else
-- chan->header_type = 2; /* large */
-- }
--
-- ACCESS_ONCE(session->active) = 1;
-- ACCESS_ONCE(session->been_active) = 1;
-- ret = _ltt_session_metadata_statedump(session);
-- if (ret)
-- ACCESS_ONCE(session->active) = 0;
--end:
-- mutex_unlock(&sessions_mutex);
-- return ret;
--}
--
--int ltt_session_disable(struct ltt_session *session)
--{
-- int ret = 0;
--
-- mutex_lock(&sessions_mutex);
-- if (!session->active) {
-- ret = -EBUSY;
-- goto end;
-- }
-- ACCESS_ONCE(session->active) = 0;
--end:
-- mutex_unlock(&sessions_mutex);
-- return ret;
--}
--
--int ltt_channel_enable(struct ltt_channel *channel)
--{
-- int old;
--
-- if (channel == channel->session->metadata)
-- return -EPERM;
-- old = xchg(&channel->enabled, 1);
-- if (old)
-- return -EEXIST;
-- return 0;
--}
--
--int ltt_channel_disable(struct ltt_channel *channel)
--{
-- int old;
--
-- if (channel == channel->session->metadata)
-- return -EPERM;
-- old = xchg(&channel->enabled, 0);
-- if (!old)
-- return -EEXIST;
-- return 0;
--}
--
--int ltt_event_enable(struct ltt_event *event)
--{
-- int old;
--
-- if (event->chan == event->chan->session->metadata)
-- return -EPERM;
-- old = xchg(&event->enabled, 1);
-- if (old)
-- return -EEXIST;
-- return 0;
--}
--
--int ltt_event_disable(struct ltt_event *event)
--{
-- int old;
--
-- if (event->chan == event->chan->session->metadata)
-- return -EPERM;
-- old = xchg(&event->enabled, 0);
-- if (!old)
-- return -EEXIST;
-- return 0;
--}
--
--static struct ltt_transport *ltt_transport_find(const char *name)
--{
-- struct ltt_transport *transport;
--
-- list_for_each_entry(transport, &ltt_transport_list, node) {
-- if (!strcmp(transport->name, name))
-- return transport;
-- }
-- return NULL;
--}
--
--struct ltt_channel *ltt_channel_create(struct ltt_session *session,
-- const char *transport_name,
-- void *buf_addr,
-- size_t subbuf_size, size_t num_subbuf,
-- unsigned int switch_timer_interval,
-- unsigned int read_timer_interval)
--{
-- struct ltt_channel *chan;
-- struct ltt_transport *transport = NULL;
--
-- mutex_lock(&sessions_mutex);
-- if (session->been_active)
-- goto active; /* Refuse to add channel to active session */
-- transport = ltt_transport_find(transport_name);
-- if (!transport) {
-- printk(KERN_WARNING "LTTng transport %s not found\n",
-- transport_name);
-- goto notransport;
-- }
-- if (!try_module_get(transport->owner)) {
-- printk(KERN_WARNING "LTT : Can't lock transport module.\n");
-- goto notransport;
-- }
-- chan = kzalloc(sizeof(struct ltt_channel), GFP_KERNEL);
-- if (!chan)
-- goto nomem;
-- chan->session = session;
-- chan->id = session->free_chan_id++;
-- /*
-- * Note: the channel creation op already writes into the packet
-- * headers. Therefore the "chan" information used as input
-- * should be already accessible.
-- */
-- chan->chan = transport->ops.channel_create("[lttng]", chan, buf_addr,
-- subbuf_size, num_subbuf, switch_timer_interval,
-- read_timer_interval);
-- if (!chan->chan)
-- goto create_error;
-- chan->enabled = 1;
-- chan->ops = &transport->ops;
-- chan->transport = transport;
-- list_add(&chan->list, &session->chan);
-- mutex_unlock(&sessions_mutex);
-- return chan;
--
--create_error:
-- kfree(chan);
--nomem:
-- if (transport)
-- module_put(transport->owner);
--notransport:
--active:
-- mutex_unlock(&sessions_mutex);
-- return NULL;
--}
--
--/*
-- * Only used internally at session destruction.
-- */
--static
--void _ltt_channel_destroy(struct ltt_channel *chan)
--{
-- chan->ops->channel_destroy(chan->chan);
-- module_put(chan->transport->owner);
-- list_del(&chan->list);
-- lttng_destroy_context(chan->ctx);
-- kfree(chan);
--}
--
--/*
-- * Supports event creation while tracing session is active.
-- */
--struct ltt_event *ltt_event_create(struct ltt_channel *chan,
-- struct lttng_kernel_event *event_param,
-- void *filter,
-- const struct lttng_event_desc *internal_desc)
--{
-- struct ltt_event *event;
-- int ret;
--
-- mutex_lock(&sessions_mutex);
-- if (chan->free_event_id == -1UL)
-- goto full;
-- /*
-- * This is O(n^2) (for each event, the loop is called at event
-- * creation). Might require a hash if we have lots of events.
-- */
-- list_for_each_entry(event, &chan->session->events, list)
-- if (!strcmp(event->desc->name, event_param->name))
-- goto exist;
-- event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
-- if (!event)
-- goto cache_error;
-- event->chan = chan;
-- event->filter = filter;
-- event->id = chan->free_event_id++;
-- event->enabled = 1;
-- event->instrumentation = event_param->instrumentation;
-- /* Populate ltt_event structure before tracepoint registration. */
-- smp_wmb();
-- switch (event_param->instrumentation) {
-- case LTTNG_KERNEL_TRACEPOINT:
-- event->desc = ltt_event_get(event_param->name);
-- if (!event->desc)
-- goto register_error;
-- ret = tracepoint_probe_register(event_param->name,
-- event->desc->probe_callback,
-- event);
-- if (ret)
-- goto register_error;
-- break;
-- case LTTNG_KERNEL_KPROBE:
-- ret = lttng_kprobes_register(event_param->name,
-- event_param->u.kprobe.symbol_name,
-- event_param->u.kprobe.offset,
-- event_param->u.kprobe.addr,
-- event);
-- if (ret)
-- goto register_error;
-- ret = try_module_get(event->desc->owner);
-- WARN_ON_ONCE(!ret);
-- break;
-- case LTTNG_KERNEL_KRETPROBE:
-- {
-- struct ltt_event *event_return;
--
-- /* kretprobe defines 2 events */
-- event_return =
-- kmem_cache_zalloc(event_cache, GFP_KERNEL);
-- if (!event_return)
-- goto register_error;
-- event_return->chan = chan;
-- event_return->filter = filter;
-- event_return->id = chan->free_event_id++;
-- event_return->enabled = 1;
-- event_return->instrumentation = event_param->instrumentation;
-- /*
-- * Populate ltt_event structure before kretprobe registration.
-- */
-- smp_wmb();
-- ret = lttng_kretprobes_register(event_param->name,
-- event_param->u.kretprobe.symbol_name,
-- event_param->u.kretprobe.offset,
-- event_param->u.kretprobe.addr,
-- event, event_return);
-- if (ret) {
-- kmem_cache_free(event_cache, event_return);
-- goto register_error;
-- }
-- /* Take 2 refs on the module: one per event. */
-- ret = try_module_get(event->desc->owner);
-- WARN_ON_ONCE(!ret);
-- ret = try_module_get(event->desc->owner);
-- WARN_ON_ONCE(!ret);
-- ret = _ltt_event_metadata_statedump(chan->session, chan,
-- event_return);
-- if (ret) {
-- kmem_cache_free(event_cache, event_return);
-- module_put(event->desc->owner);
-- module_put(event->desc->owner);
-- goto statedump_error;
-- }
-- list_add(&event_return->list, &chan->session->events);
-- break;
-- }
-- case LTTNG_KERNEL_FUNCTION:
-- ret = lttng_ftrace_register(event_param->name,
-- event_param->u.ftrace.symbol_name,
-- event);
-- if (ret)
-- goto register_error;
-- ret = try_module_get(event->desc->owner);
-- WARN_ON_ONCE(!ret);
-- break;
-- case LTTNG_KERNEL_NOOP:
-- event->desc = internal_desc;
-- if (!event->desc)
-- goto register_error;
-- break;
-- default:
-- WARN_ON_ONCE(1);
-- }
-- ret = _ltt_event_metadata_statedump(chan->session, chan, event);
-- if (ret)
-- goto statedump_error;
-- list_add(&event->list, &chan->session->events);
-- mutex_unlock(&sessions_mutex);
-- return event;
--
--statedump_error:
-- /* If a statedump error occurs, events will not be readable. */
--register_error:
-- kmem_cache_free(event_cache, event);
--cache_error:
--exist:
--full:
-- mutex_unlock(&sessions_mutex);
-- return NULL;
--}
--
--/*
-- * Only used internally at session destruction.
-- */
--int _ltt_event_unregister(struct ltt_event *event)
--{
-- int ret = -EINVAL;
--
-- switch (event->instrumentation) {
-- case LTTNG_KERNEL_TRACEPOINT:
-- ret = tracepoint_probe_unregister(event->desc->name,
-- event->desc->probe_callback,
-- event);
-- if (ret)
-- return ret;
-- break;
-- case LTTNG_KERNEL_KPROBE:
-- lttng_kprobes_unregister(event);
-- ret = 0;
-- break;
-- case LTTNG_KERNEL_KRETPROBE:
-- lttng_kretprobes_unregister(event);
-- ret = 0;
-- break;
-- case LTTNG_KERNEL_FUNCTION:
-- lttng_ftrace_unregister(event);
-- ret = 0;
-- break;
-- case LTTNG_KERNEL_NOOP:
-- ret = 0;
-- break;
-- default:
-- WARN_ON_ONCE(1);
-- }
-- return ret;
--}
--
--/*
-- * Only used internally at session destruction.
-- */
--static
--void _ltt_event_destroy(struct ltt_event *event)
--{
-- switch (event->instrumentation) {
-- case LTTNG_KERNEL_TRACEPOINT:
-- ltt_event_put(event->desc);
-- break;
-- case LTTNG_KERNEL_KPROBE:
-- module_put(event->desc->owner);
-- lttng_kprobes_destroy_private(event);
-- break;
-- case LTTNG_KERNEL_KRETPROBE:
-- module_put(event->desc->owner);
-- lttng_kretprobes_destroy_private(event);
-- break;
-- case LTTNG_KERNEL_FUNCTION:
-- module_put(event->desc->owner);
-- lttng_ftrace_destroy_private(event);
-- break;
-- case LTTNG_KERNEL_NOOP:
-- break;
-- default:
-- WARN_ON_ONCE(1);
-- }
-- list_del(&event->list);
-- lttng_destroy_context(event->ctx);
-- kmem_cache_free(event_cache, event);
--}
--
--/*
-- * We have exclusive access to our metadata buffer (protected by the
-- * sessions_mutex), so we can do racy operations such as looking for
-- * remaining space left in packet and write, since mutual exclusion
-- * protects us from concurrent writes.
-- */
--int lttng_metadata_printf(struct ltt_session *session,
-- const char *fmt, ...)
--{
-- struct lib_ring_buffer_ctx ctx;
-- struct ltt_channel *chan = session->metadata;
-- char *str;
-- int ret = 0, waitret;
-- size_t len, reserve_len, pos;
-- va_list ap;
--
-- WARN_ON_ONCE(!ACCESS_ONCE(session->active));
--
-- va_start(ap, fmt);
-- str = kvasprintf(GFP_KERNEL, fmt, ap);
-- va_end(ap);
-- if (!str)
-- return -ENOMEM;
--
-- len = strlen(str);
-- pos = 0;
--
-- for (pos = 0; pos < len; pos += reserve_len) {
-- reserve_len = min_t(size_t,
-- chan->ops->packet_avail_size(chan->chan),
-- len - pos);
-- lib_ring_buffer_ctx_init(&ctx, chan->chan, NULL, reserve_len,
-- sizeof(char), -1);
-- /*
-- * We don't care about metadata buffer's records lost
-- * count, because we always retry here. Report error if
-- * we need to bail out after timeout or being
-- * interrupted.
-- */
-- waitret = wait_event_interruptible_timeout(*chan->ops->get_writer_buf_wait_queue(chan->chan, -1),
-- ({
-- ret = chan->ops->event_reserve(&ctx, 0);
-- ret != -ENOBUFS || !ret;
-- }),
-- msecs_to_jiffies(LTTNG_METADATA_TIMEOUT_MSEC));
-- if (!waitret || waitret == -ERESTARTSYS || ret) {
-- printk(KERN_WARNING "LTTng: Failure to write metadata to buffers (%s)\n",
-- waitret == -ERESTARTSYS ? "interrupted" :
-- (ret == -ENOBUFS ? "timeout" : "I/O error"));
-- if (waitret == -ERESTARTSYS)
-- ret = waitret;
-- goto end;
-- }
-- chan->ops->event_write(&ctx, &str[pos], reserve_len);
-- chan->ops->event_commit(&ctx);
-- }
--end:
-- kfree(str);
-- return ret;
--}
--
--static
--int _ltt_field_statedump(struct ltt_session *session,
-- const struct lttng_event_field *field)
--{
-- int ret = 0;
--
-- switch (field->type.atype) {
-- case atype_integer:
-- ret = lttng_metadata_printf(session,
-- " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s;\n",
-- field->type.u.basic.integer.size,
-- field->type.u.basic.integer.alignment,
-- field->type.u.basic.integer.signedness,
-- (field->type.u.basic.integer.encoding == lttng_encode_none)
-- ? "none"
-- : (field->type.u.basic.integer.encoding == lttng_encode_UTF8)
-- ? "UTF8"
-- : "ASCII",
-- field->type.u.basic.integer.base,
--#ifdef __BIG_ENDIAN
-- field->type.u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
--#else
-- field->type.u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
--#endif
-- field->name);
-- break;
-- case atype_enum:
-- ret = lttng_metadata_printf(session,
-- " %s _%s;\n",
-- field->type.u.basic.enumeration.name,
-- field->name);
-- break;
-- case atype_array:
-- {
-- const struct lttng_basic_type *elem_type;
--
-- elem_type = &field->type.u.array.elem_type;
-- ret = lttng_metadata_printf(session,
-- " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[%u];\n",
-- elem_type->u.basic.integer.size,
-- elem_type->u.basic.integer.alignment,
-- elem_type->u.basic.integer.signedness,
-- (elem_type->u.basic.integer.encoding == lttng_encode_none)
-- ? "none"
-- : (elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
-- ? "UTF8"
-- : "ASCII",
-- elem_type->u.basic.integer.base,
--#ifdef __BIG_ENDIAN
-- elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
--#else
-- elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
--#endif
-- field->name, field->type.u.array.length);
-- break;
-- }
-- case atype_sequence:
-- {
-- const struct lttng_basic_type *elem_type;
-- const struct lttng_basic_type *length_type;
--
-- elem_type = &field->type.u.sequence.elem_type;
-- length_type = &field->type.u.sequence.length_type;
-- ret = lttng_metadata_printf(session,
-- " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } __%s_length;\n",
-- length_type->u.basic.integer.size,
-- (unsigned int) length_type->u.basic.integer.alignment,
-- length_type->u.basic.integer.signedness,
-- (length_type->u.basic.integer.encoding == lttng_encode_none)
-- ? "none"
-- : ((length_type->u.basic.integer.encoding == lttng_encode_UTF8)
-- ? "UTF8"
-- : "ASCII"),
-- length_type->u.basic.integer.base,
--#ifdef __BIG_ENDIAN
-- length_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
--#else
-- length_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
--#endif
-- field->name);
-- if (ret)
-- return ret;
--
-- ret = lttng_metadata_printf(session,
-- " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[ __%s_length ];\n",
-- elem_type->u.basic.integer.size,
-- (unsigned int) elem_type->u.basic.integer.alignment,
-- elem_type->u.basic.integer.signedness,
-- (elem_type->u.basic.integer.encoding == lttng_encode_none)
-- ? "none"
-- : ((elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
-- ? "UTF8"
-- : "ASCII"),
-- elem_type->u.basic.integer.base,
--#ifdef __BIG_ENDIAN
-- elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
--#else
-- elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
--#endif
-- field->name,
-- field->name);
-- break;
-- }
--
-- case atype_string:
-- /* Default encoding is UTF8 */
-- ret = lttng_metadata_printf(session,
-- " string%s _%s;\n",
-- field->type.u.basic.string.encoding == lttng_encode_ASCII ?
-- " { encoding = ASCII; }" : "",
-- field->name);
-- break;
-- default:
-- WARN_ON_ONCE(1);
-- return -EINVAL;
-- }
-- return ret;
--}
--
--static
--int _ltt_context_metadata_statedump(struct ltt_session *session,
-- struct lttng_ctx *ctx)
--{
-- int ret = 0;
-- int i;
--
-- if (!ctx)
-- return 0;
-- for (i = 0; i < ctx->nr_fields; i++) {
-- const struct lttng_ctx_field *field = &ctx->fields[i];
--
-- ret = _ltt_field_statedump(session, &field->event_field);
-- if (ret)
-- return ret;
-- }
-- return ret;
--}
--
--static
--int _ltt_fields_metadata_statedump(struct ltt_session *session,
-- struct ltt_event *event)
--{
-- const struct lttng_event_desc *desc = event->desc;
-- int ret = 0;
-- int i;
--
-- for (i = 0; i < desc->nr_fields; i++) {
-- const struct lttng_event_field *field = &desc->fields[i];
--
-- ret = _ltt_field_statedump(session, field);
-- if (ret)
-- return ret;
-- }
-- return ret;
--}
--
--static
--int _ltt_event_metadata_statedump(struct ltt_session *session,
-- struct ltt_channel *chan,
-- struct ltt_event *event)
--{
-- int ret = 0;
--
-- if (event->metadata_dumped || !ACCESS_ONCE(session->active))
-- return 0;
-- if (chan == session->metadata)
-- return 0;
--
-- ret = lttng_metadata_printf(session,
-- "event {\n"
-- " name = %s;\n"
-- " id = %u;\n"
-- " stream_id = %u;\n",
-- event->desc->name,
-- event->id,
-- event->chan->id);
-- if (ret)
-- goto end;
--
-- if (event->ctx) {
-- ret = lttng_metadata_printf(session,
-- " context := struct {\n");
-- if (ret)
-- goto end;
-- }
-- ret = _ltt_context_metadata_statedump(session, event->ctx);
-- if (ret)
-- goto end;
-- if (event->ctx) {
-- ret = lttng_metadata_printf(session,
-- " };\n");
-- if (ret)
-- goto end;
-- }
--
-- ret = lttng_metadata_printf(session,
-- " fields := struct {\n"
-- );
-- if (ret)
-- goto end;
--
-- ret = _ltt_fields_metadata_statedump(session, event);
-- if (ret)
-- goto end;
--
-- /*
-- * LTTng space reservation can only reserve multiples of the
-- * byte size.
-- */
-- ret = lttng_metadata_printf(session,
-- " };\n"
-- "};\n\n");
-- if (ret)
-- goto end;
--
-- event->metadata_dumped = 1;
--end:
-- return ret;
--
--}
--
--static
--int _ltt_channel_metadata_statedump(struct ltt_session *session,
-- struct ltt_channel *chan)
--{
-- int ret = 0;
--
-- if (chan->metadata_dumped || !ACCESS_ONCE(session->active))
-- return 0;
-- if (chan == session->metadata)
-- return 0;
--
-- WARN_ON_ONCE(!chan->header_type);
-- ret = lttng_metadata_printf(session,
-- "stream {\n"
-- " id = %u;\n"
-- " event.header := %s;\n"
-- " packet.context := struct packet_context;\n",
-- chan->id,
-- chan->header_type == 1 ? "struct event_header_compact" :
-- "struct event_header_large");
-- if (ret)
-- goto end;
--
-- if (chan->ctx) {
-- ret = lttng_metadata_printf(session,
-- " event.context := struct {\n");
-- if (ret)
-- goto end;
-- }
-- ret = _ltt_context_metadata_statedump(session, chan->ctx);
-- if (ret)
-- goto end;
-- if (chan->ctx) {
-- ret = lttng_metadata_printf(session,
-- " };\n");
-- if (ret)
-- goto end;
-- }
--
-- ret = lttng_metadata_printf(session,
-- "};\n\n");
--
-- chan->metadata_dumped = 1;
--end:
-- return ret;
--}
--
--static
--int _ltt_stream_packet_context_declare(struct ltt_session *session)
--{
-- return lttng_metadata_printf(session,
-- "struct packet_context {\n"
-- " uint64_t timestamp_begin;\n"
-- " uint64_t timestamp_end;\n"
-- " uint32_t events_discarded;\n"
-- " uint32_t content_size;\n"
-- " uint32_t packet_size;\n"
-- " uint32_t cpu_id;\n"
-- "};\n\n"
-- );
--}
--
--/*
-- * Compact header:
-- * id: range: 0 - 30.
-- * id 31 is reserved to indicate an extended header.
-- *
-- * Large header:
-- * id: range: 0 - 65534.
-- * id 65535 is reserved to indicate an extended header.
-- */
--static
--int _ltt_event_header_declare(struct ltt_session *session)
--{
-- return lttng_metadata_printf(session,
-- "struct event_header_compact {\n"
-- " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
-- " variant <id> {\n"
-- " struct {\n"
-- " uint27_t timestamp;\n"
-- " } compact;\n"
-- " struct {\n"
-- " uint32_t id;\n"
-- " uint64_t timestamp;\n"
-- " } extended;\n"
-- " } v;\n"
-- "} align(%u);\n"
-- "\n"
-- "struct event_header_large {\n"
-- " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
-- " variant <id> {\n"
-- " struct {\n"
-- " uint32_t timestamp;\n"
-- " } compact;\n"
-- " struct {\n"
-- " uint32_t id;\n"
-- " uint64_t timestamp;\n"
-- " } extended;\n"
-- " } v;\n"
-- "} align(%u);\n\n",
-- ltt_alignof(uint32_t) * CHAR_BIT,
-- ltt_alignof(uint16_t) * CHAR_BIT
-- );
--}
--
--/*
-- * Output metadata into this session's metadata buffers.
-- */
--static
--int _ltt_session_metadata_statedump(struct ltt_session *session)
--{
-- unsigned char *uuid_c = session->uuid.b;
-- unsigned char uuid_s[37];
-- struct ltt_channel *chan;
-- struct ltt_event *event;
-- int ret = 0;
--
-- if (!ACCESS_ONCE(session->active))
-- return 0;
-- if (session->metadata_dumped)
-- goto skip_session;
-- if (!session->metadata) {
-- printk(KERN_WARNING "LTTng: attempt to start tracing, but metadata channel is not found. Operation abort.\n");
-- return -EPERM;
-- }
--
-- snprintf(uuid_s, sizeof(uuid_s),
-- "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
-- uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
-- uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
-- uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
-- uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
--
-- ret = lttng_metadata_printf(session,
-- "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
-- "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
-- "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
-- "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
-- "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
-- "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
-- "\n"
-- "trace {\n"
-- " major = %u;\n"
-- " minor = %u;\n"
-- " uuid = \"%s\";\n"
-- " byte_order = %s;\n"
-- " packet.header := struct {\n"
-- " uint32_t magic;\n"
-- " uint8_t uuid[16];\n"
-- " uint32_t stream_id;\n"
-- " };\n"
-- "};\n\n",
-- ltt_alignof(uint8_t) * CHAR_BIT,
-- ltt_alignof(uint16_t) * CHAR_BIT,
-- ltt_alignof(uint32_t) * CHAR_BIT,
-- ltt_alignof(uint64_t) * CHAR_BIT,
-- CTF_VERSION_MAJOR,
-- CTF_VERSION_MINOR,
-- uuid_s,
--#ifdef __BIG_ENDIAN
-- "be"
--#else
-- "le"
--#endif
-- );
-- if (ret)
-- goto end;
--
-- ret = _ltt_stream_packet_context_declare(session);
-- if (ret)
-- goto end;
--
-- ret = _ltt_event_header_declare(session);
-- if (ret)
-- goto end;
--
--skip_session:
-- list_for_each_entry(chan, &session->chan, list) {
-- ret = _ltt_channel_metadata_statedump(session, chan);
-- if (ret)
-- goto end;
-- }
--
-- list_for_each_entry(event, &session->events, list) {
-- ret = _ltt_event_metadata_statedump(session, event->chan, event);
-- if (ret)
-- goto end;
-- }
-- session->metadata_dumped = 1;
--end:
-- return ret;
--}
--
--/**
-- * ltt_transport_register - LTT transport registration
-- * @transport: transport structure
-- *
-- * Registers a transport which can be used as output to extract the data out of
-- * LTTng. The module calling this registration function must ensure that no
-- * trap-inducing code will be executed by the transport functions. E.g.
-- * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
-- * is made visible to the transport function. This registration acts as a
-- * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
-- * after its registration must it synchronize the TLBs.
-- */
--void ltt_transport_register(struct ltt_transport *transport)
--{
-- /*
-- * Make sure no page fault can be triggered by the module about to be
-- * registered. We deal with this here so we don't have to call
-- * vmalloc_sync_all() in each module's init.
-- */
-- wrapper_vmalloc_sync_all();
--
-- mutex_lock(&sessions_mutex);
-- list_add_tail(&transport->node, &ltt_transport_list);
-- mutex_unlock(&sessions_mutex);
--}
--EXPORT_SYMBOL_GPL(ltt_transport_register);
--
--/**
-- * ltt_transport_unregister - LTT transport unregistration
-- * @transport: transport structure
-- */
--void ltt_transport_unregister(struct ltt_transport *transport)
--{
-- mutex_lock(&sessions_mutex);
-- list_del(&transport->node);
-- mutex_unlock(&sessions_mutex);
--}
--EXPORT_SYMBOL_GPL(ltt_transport_unregister);
--
--static int __init ltt_events_init(void)
--{
-- int ret;
--
-- event_cache = KMEM_CACHE(ltt_event, 0);
-- if (!event_cache)
-- return -ENOMEM;
-- ret = ltt_debugfs_abi_init();
-- if (ret)
-- goto error_abi;
-- return 0;
--error_abi:
-- kmem_cache_destroy(event_cache);
-- return ret;
--}
--
--module_init(ltt_events_init);
--
--static void __exit ltt_events_exit(void)
--{
-- struct ltt_session *session, *tmpsession;
--
-- ltt_debugfs_abi_exit();
-- list_for_each_entry_safe(session, tmpsession, &sessions, list)
-- ltt_session_destroy(session);
-- kmem_cache_destroy(event_cache);
--}
--
--module_exit(ltt_events_exit);
--
--MODULE_LICENSE("GPL and additional rights");
--MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
--MODULE_DESCRIPTION("LTTng Events");
---- a/drivers/staging/lttng/ltt-events.h
-+++ /dev/null
-@@ -1,452 +0,0 @@
--#ifndef _LTT_EVENTS_H
--#define _LTT_EVENTS_H
--
--/*
-- * ltt-events.h
-- *
-- * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
-- * Holds LTTng per-session event registry.
-- *
-- * Dual LGPL v2.1/GPL v2 license.
-- */
--
--#include <linux/list.h>
--#include <linux/kprobes.h>
--#include "wrapper/uuid.h"
--#include "ltt-debugfs-abi.h"
--
--#undef is_signed_type
--#define is_signed_type(type) (((type)(-1)) < 0)
--
--struct ltt_channel;
--struct ltt_session;
--struct lib_ring_buffer_ctx;
--struct perf_event;
--struct perf_event_attr;
--
--/* Type description */
--
--/* Update the astract_types name table in lttng-types.c along with this enum */
--enum abstract_types {
-- atype_integer,
-- atype_enum,
-- atype_array,
-- atype_sequence,
-- atype_string,
-- NR_ABSTRACT_TYPES,
--};
--
--/* Update the string_encodings name table in lttng-types.c along with this enum */
--enum lttng_string_encodings {
-- lttng_encode_none = 0,
-- lttng_encode_UTF8 = 1,
-- lttng_encode_ASCII = 2,
-- NR_STRING_ENCODINGS,
--};
--
--struct lttng_enum_entry {
-- unsigned long long start, end; /* start and end are inclusive */
-- const char *string;
--};
--
--#define __type_integer(_type, _byte_order, _base, _encoding) \
-- { \
-- .atype = atype_integer, \
-- .u.basic.integer = \
-- { \
-- .size = sizeof(_type) * CHAR_BIT, \
-- .alignment = ltt_alignof(_type) * CHAR_BIT, \
-- .signedness = is_signed_type(_type), \
-- .reverse_byte_order = _byte_order != __BYTE_ORDER, \
-- .base = _base, \
-- .encoding = lttng_encode_##_encoding, \
-- }, \
-- } \
--
--struct lttng_integer_type {
-- unsigned int size; /* in bits */
-- unsigned short alignment; /* in bits */
-- uint signedness:1;
-- uint reverse_byte_order:1;
-- unsigned int base; /* 2, 8, 10, 16, for pretty print */
-- enum lttng_string_encodings encoding;
--};
--
--union _lttng_basic_type {
-- struct lttng_integer_type integer;
-- struct {
-- const char *name;
-- } enumeration;
-- struct {
-- enum lttng_string_encodings encoding;
-- } string;
--};
--
--struct lttng_basic_type {
-- enum abstract_types atype;
-- union {
-- union _lttng_basic_type basic;
-- } u;
--};
--
--struct lttng_type {
-- enum abstract_types atype;
-- union {
-- union _lttng_basic_type basic;
-- struct {
-- struct lttng_basic_type elem_type;
-- unsigned int length; /* num. elems. */
-- } array;
-- struct {
-- struct lttng_basic_type length_type;
-- struct lttng_basic_type elem_type;
-- } sequence;
-- } u;
--};
--
--struct lttng_enum {
-- const char *name;
-- struct lttng_type container_type;
-- const struct lttng_enum_entry *entries;
-- unsigned int len;
--};
--
--/* Event field description */
--
--struct lttng_event_field {
-- const char *name;
-- struct lttng_type type;
--};
--
--/*
-- * We need to keep this perf counter field separately from struct
-- * lttng_ctx_field because cpu hotplug needs fixed-location addresses.
-- */
--struct lttng_perf_counter_field {
-- struct notifier_block nb;
-- int hp_enable;
-- struct perf_event_attr *attr;
-- struct perf_event **e; /* per-cpu array */
--};
--
--struct lttng_ctx_field {
-- struct lttng_event_field event_field;
-- size_t (*get_size)(size_t offset);
-- void (*record)(struct lttng_ctx_field *field,
-- struct lib_ring_buffer_ctx *ctx,
-- struct ltt_channel *chan);
-- union {
-- struct lttng_perf_counter_field *perf_counter;
-- } u;
-- void (*destroy)(struct lttng_ctx_field *field);
--};
--
--struct lttng_ctx {
-- struct lttng_ctx_field *fields;
-- unsigned int nr_fields;
-- unsigned int allocated_fields;
--};
--
--struct lttng_event_desc {
-- const char *name;
-- void *probe_callback;
-- const struct lttng_event_ctx *ctx; /* context */
-- const struct lttng_event_field *fields; /* event payload */
-- unsigned int nr_fields;
-- struct module *owner;
--};
--
--struct lttng_probe_desc {
-- const struct lttng_event_desc **event_desc;
-- unsigned int nr_events;
-- struct list_head head; /* chain registered probes */
--};
--
--struct lttng_krp; /* Kretprobe handling */
--
--/*
-- * ltt_event structure is referred to by the tracing fast path. It must be
-- * kept small.
-- */
--struct ltt_event {
-- unsigned int id;
-- struct ltt_channel *chan;
-- int enabled;
-- const struct lttng_event_desc *desc;
-- void *filter;
-- struct lttng_ctx *ctx;
-- enum lttng_kernel_instrumentation instrumentation;
-- union {
-- struct {
-- struct kprobe kp;
-- char *symbol_name;
-- } kprobe;
-- struct {
-- struct lttng_krp *lttng_krp;
-- char *symbol_name;
-- } kretprobe;
-- struct {
-- char *symbol_name;
-- } ftrace;
-- } u;
-- struct list_head list; /* Event list */
-- uint metadata_dumped:1;
--};
--
--struct ltt_channel_ops {
-- struct channel *(*channel_create)(const char *name,
-- struct ltt_channel *ltt_chan,
-- void *buf_addr,
-- size_t subbuf_size, size_t num_subbuf,
-- unsigned int switch_timer_interval,
-- unsigned int read_timer_interval);
-- void (*channel_destroy)(struct channel *chan);
-- struct lib_ring_buffer *(*buffer_read_open)(struct channel *chan);
-- int (*buffer_has_read_closed_stream)(struct channel *chan);
-- void (*buffer_read_close)(struct lib_ring_buffer *buf);
-- int (*event_reserve)(struct lib_ring_buffer_ctx *ctx,
-- uint32_t event_id);
-- void (*event_commit)(struct lib_ring_buffer_ctx *ctx);
-- void (*event_write)(struct lib_ring_buffer_ctx *ctx, const void *src,
-- size_t len);
-- void (*event_write_from_user)(struct lib_ring_buffer_ctx *ctx,
-- const void *src, size_t len);
-- void (*event_memset)(struct lib_ring_buffer_ctx *ctx,
-- int c, size_t len);
-- /*
-- * packet_avail_size returns the available size in the current
-- * packet. Note that the size returned is only a hint, since it
-- * may change due to concurrent writes.
-- */
-- size_t (*packet_avail_size)(struct channel *chan);
-- wait_queue_head_t *(*get_writer_buf_wait_queue)(struct channel *chan, int cpu);
-- wait_queue_head_t *(*get_hp_wait_queue)(struct channel *chan);
-- int (*is_finalized)(struct channel *chan);
-- int (*is_disabled)(struct channel *chan);
--};
--
--struct ltt_transport {
-- char *name;
-- struct module *owner;
-- struct list_head node;
-- struct ltt_channel_ops ops;
--};
--
--struct ltt_channel {
-- unsigned int id;
-- struct channel *chan; /* Channel buffers */
-- int enabled;
-- struct lttng_ctx *ctx;
-- /* Event ID management */
-- struct ltt_session *session;
-- struct file *file; /* File associated to channel */
-- unsigned int free_event_id; /* Next event ID to allocate */
-- struct list_head list; /* Channel list */
-- struct ltt_channel_ops *ops;
-- struct ltt_transport *transport;
-- struct ltt_event **sc_table; /* for syscall tracing */
-- struct ltt_event **compat_sc_table;
-- struct ltt_event *sc_unknown; /* for unknown syscalls */
-- struct ltt_event *sc_compat_unknown;
-- struct ltt_event *sc_exit; /* for syscall exit */
-- int header_type; /* 0: unset, 1: compact, 2: large */
-- uint metadata_dumped:1;
--};
--
--struct ltt_session {
-- int active; /* Is trace session active ? */
-- int been_active; /* Has trace session been active ? */
-- struct file *file; /* File associated to session */
-- struct ltt_channel *metadata; /* Metadata channel */
-- struct list_head chan; /* Channel list head */
-- struct list_head events; /* Event list head */
-- struct list_head list; /* Session list */
-- unsigned int free_chan_id; /* Next chan ID to allocate */
-- uuid_le uuid; /* Trace session unique ID */
-- uint metadata_dumped:1;
--};
--
--struct ltt_session *ltt_session_create(void);
--int ltt_session_enable(struct ltt_session *session);
--int ltt_session_disable(struct ltt_session *session);
--void ltt_session_destroy(struct ltt_session *session);
--
--struct ltt_channel *ltt_channel_create(struct ltt_session *session,
-- const char *transport_name,
-- void *buf_addr,
-- size_t subbuf_size, size_t num_subbuf,
-- unsigned int switch_timer_interval,
-- unsigned int read_timer_interval);
--struct ltt_channel *ltt_global_channel_create(struct ltt_session *session,
-- int overwrite, void *buf_addr,
-- size_t subbuf_size, size_t num_subbuf,
-- unsigned int switch_timer_interval,
-- unsigned int read_timer_interval);
--
--struct ltt_event *ltt_event_create(struct ltt_channel *chan,
-- struct lttng_kernel_event *event_param,
-- void *filter,
-- const struct lttng_event_desc *internal_desc);
--
--int ltt_channel_enable(struct ltt_channel *channel);
--int ltt_channel_disable(struct ltt_channel *channel);
--int ltt_event_enable(struct ltt_event *event);
--int ltt_event_disable(struct ltt_event *event);
--
--void ltt_transport_register(struct ltt_transport *transport);
--void ltt_transport_unregister(struct ltt_transport *transport);
--
--void synchronize_trace(void);
--int ltt_debugfs_abi_init(void);
--void ltt_debugfs_abi_exit(void);
--
--int ltt_probe_register(struct lttng_probe_desc *desc);
--void ltt_probe_unregister(struct lttng_probe_desc *desc);
--const struct lttng_event_desc *ltt_event_get(const char *name);
--void ltt_event_put(const struct lttng_event_desc *desc);
--int ltt_probes_init(void);
--void ltt_probes_exit(void);
--
--#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
--int lttng_syscalls_register(struct ltt_channel *chan, void *filter);
--int lttng_syscalls_unregister(struct ltt_channel *chan);
--#else
--static inline int lttng_syscalls_register(struct ltt_channel *chan, void *filter)
--{
-- return -ENOSYS;
--}
--
--static inline int lttng_syscalls_unregister(struct ltt_channel *chan)
--{
-- return 0;
--}
--#endif
--
--struct lttng_ctx_field *lttng_append_context(struct lttng_ctx **ctx);
--int lttng_find_context(struct lttng_ctx *ctx, const char *name);
--void lttng_remove_context_field(struct lttng_ctx **ctx,
-- struct lttng_ctx_field *field);
--void lttng_destroy_context(struct lttng_ctx *ctx);
--int lttng_add_pid_to_ctx(struct lttng_ctx **ctx);
--int lttng_add_procname_to_ctx(struct lttng_ctx **ctx);
--int lttng_add_prio_to_ctx(struct lttng_ctx **ctx);
--int lttng_add_nice_to_ctx(struct lttng_ctx **ctx);
--int lttng_add_vpid_to_ctx(struct lttng_ctx **ctx);
--int lttng_add_tid_to_ctx(struct lttng_ctx **ctx);
--int lttng_add_vtid_to_ctx(struct lttng_ctx **ctx);
--int lttng_add_ppid_to_ctx(struct lttng_ctx **ctx);
--int lttng_add_vppid_to_ctx(struct lttng_ctx **ctx);
--#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
--int lttng_add_perf_counter_to_ctx(uint32_t type,
-- uint64_t config,
-- const char *name,
-- struct lttng_ctx **ctx);
--#else
--static inline
--int lttng_add_perf_counter_to_ctx(uint32_t type,
-- uint64_t config,
-- const char *name,
-- struct lttng_ctx **ctx)
--{
-- return -ENOSYS;
--}
--#endif
--
--#ifdef CONFIG_KPROBES
--int lttng_kprobes_register(const char *name,
-- const char *symbol_name,
-- uint64_t offset,
-- uint64_t addr,
-- struct ltt_event *event);
--void lttng_kprobes_unregister(struct ltt_event *event);
--void lttng_kprobes_destroy_private(struct ltt_event *event);
--#else
--static inline
--int lttng_kprobes_register(const char *name,
-- const char *symbol_name,
-- uint64_t offset,
-- uint64_t addr,
-- struct ltt_event *event)
--{
-- return -ENOSYS;
--}
--
--static inline
--void lttng_kprobes_unregister(struct ltt_event *event)
--{
--}
--
--static inline
--void lttng_kprobes_destroy_private(struct ltt_event *event)
--{
--}
--#endif
--
--#ifdef CONFIG_KRETPROBES
--int lttng_kretprobes_register(const char *name,
-- const char *symbol_name,
-- uint64_t offset,
-- uint64_t addr,
-- struct ltt_event *event_entry,
-- struct ltt_event *event_exit);
--void lttng_kretprobes_unregister(struct ltt_event *event);
--void lttng_kretprobes_destroy_private(struct ltt_event *event);
--#else
--static inline
--int lttng_kretprobes_register(const char *name,
-- const char *symbol_name,
-- uint64_t offset,
-- uint64_t addr,
-- struct ltt_event *event_entry,
-- struct ltt_event *event_exit)
--{
-- return -ENOSYS;
--}
--
--static inline
--void lttng_kretprobes_unregister(struct ltt_event *event)
--{
--}
--
--static inline
--void lttng_kretprobes_destroy_private(struct ltt_event *event)
--{
--}
--#endif
--
--#ifdef CONFIG_DYNAMIC_FTRACE
--int lttng_ftrace_register(const char *name,
-- const char *symbol_name,
-- struct ltt_event *event);
--void lttng_ftrace_unregister(struct ltt_event *event);
--void lttng_ftrace_destroy_private(struct ltt_event *event);
--#else
--static inline
--int lttng_ftrace_register(const char *name,
-- const char *symbol_name,
-- struct ltt_event *event)
--{
-- return -ENOSYS;
--}
--
--static inline
--void lttng_ftrace_unregister(struct ltt_event *event)
--{
--}
--
--static inline
--void lttng_ftrace_destroy_private(struct ltt_event *event)
--{
--}
--#endif
--
--int lttng_calibrate(struct lttng_kernel_calibrate *calibrate);
--
--extern const struct file_operations lttng_tracepoint_list_fops;
--
--#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
--#define TRACEPOINT_HAS_DATA_ARG
--#endif
--
--#endif /* _LTT_EVENTS_H */
---- a/drivers/staging/lttng/ltt-probes.c
-+++ /dev/null
-@@ -1,164 +0,0 @@
--/*
-- * ltt-probes.c
-- *
-- * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
-- * Holds LTTng probes registry.
-- *
-- * Dual LGPL v2.1/GPL v2 license.
-- */
--
--#include <linux/module.h>
--#include <linux/list.h>
--#include <linux/mutex.h>
--#include <linux/seq_file.h>
--
--#include "ltt-events.h"
--
--static LIST_HEAD(probe_list);
--static DEFINE_MUTEX(probe_mutex);
--
--static
--const struct lttng_event_desc *find_event(const char *name)
--{
-- struct lttng_probe_desc *probe_desc;
-- int i;
--
-- list_for_each_entry(probe_desc, &probe_list, head) {
-- for (i = 0; i < probe_desc->nr_events; i++) {
-- if (!strcmp(probe_desc->event_desc[i]->name, name))
-- return probe_desc->event_desc[i];
-- }
-- }
-- return NULL;
--}
--
--int ltt_probe_register(struct lttng_probe_desc *desc)
--{
-- int ret = 0;
-- int i;
--
-- mutex_lock(&probe_mutex);
-- /*
-- * TODO: This is O(N^2). Turn into a hash table when probe registration
-- * overhead becomes an issue.
-- */
-- for (i = 0; i < desc->nr_events; i++) {
-- if (find_event(desc->event_desc[i]->name)) {
-- ret = -EEXIST;
-- goto end;
-- }
-- }
-- list_add(&desc->head, &probe_list);
--end:
-- mutex_unlock(&probe_mutex);
-- return ret;
--}
--EXPORT_SYMBOL_GPL(ltt_probe_register);
--
--void ltt_probe_unregister(struct lttng_probe_desc *desc)
--{
-- mutex_lock(&probe_mutex);
-- list_del(&desc->head);
-- mutex_unlock(&probe_mutex);
--}
--EXPORT_SYMBOL_GPL(ltt_probe_unregister);
--
--const struct lttng_event_desc *ltt_event_get(const char *name)
--{
-- const struct lttng_event_desc *event;
-- int ret;
--
-- mutex_lock(&probe_mutex);
-- event = find_event(name);
-- mutex_unlock(&probe_mutex);
-- if (!event)
-- return NULL;
-- ret = try_module_get(event->owner);
-- WARN_ON_ONCE(!ret);
-- return event;
--}
--EXPORT_SYMBOL_GPL(ltt_event_get);
--
--void ltt_event_put(const struct lttng_event_desc *event)
--{
-- module_put(event->owner);
--}
--EXPORT_SYMBOL_GPL(ltt_event_put);
--
--static
--void *tp_list_start(struct seq_file *m, loff_t *pos)
--{
-- struct lttng_probe_desc *probe_desc;
-- int iter = 0, i;
--
-- mutex_lock(&probe_mutex);
-- list_for_each_entry(probe_desc, &probe_list, head) {
-- for (i = 0; i < probe_desc->nr_events; i++) {
-- if (iter++ >= *pos)
-- return (void *) probe_desc->event_desc[i];
-- }
-- }
-- /* End of list */
-- return NULL;
--}
--
--static
--void *tp_list_next(struct seq_file *m, void *p, loff_t *ppos)
--{
-- struct lttng_probe_desc *probe_desc;
-- int iter = 0, i;
--
-- (*ppos)++;
-- list_for_each_entry(probe_desc, &probe_list, head) {
-- for (i = 0; i < probe_desc->nr_events; i++) {
-- if (iter++ >= *ppos)
-- return (void *) probe_desc->event_desc[i];
-- }
-- }
-- /* End of list */
-- return NULL;
--}
--
--static
--void tp_list_stop(struct seq_file *m, void *p)
--{
-- mutex_unlock(&probe_mutex);
--}
--
--static
--int tp_list_show(struct seq_file *m, void *p)
--{
-- const struct lttng_event_desc *probe_desc = p;
--
-- /*
-- * Don't export lttng internal events (metadata).
-- */
-- if (!strncmp(probe_desc->name, "lttng_", sizeof("lttng_") - 1))
-- return 0;
-- seq_printf(m, "event { name = %s; };\n",
-- probe_desc->name);
-- return 0;
--}
--
--static
--const struct seq_operations lttng_tracepoint_list_seq_ops = {
-- .start = tp_list_start,
-- .next = tp_list_next,
-- .stop = tp_list_stop,
-- .show = tp_list_show,
--};
--
--static
--int lttng_tracepoint_list_open(struct inode *inode, struct file *file)
--{
-- return seq_open(file, &lttng_tracepoint_list_seq_ops);
--}
--
--const struct file_operations lttng_tracepoint_list_fops = {
-- .owner = THIS_MODULE,
-- .open = lttng_tracepoint_list_open,
-- .read = seq_read,
-- .llseek = seq_lseek,
-- .release = seq_release,
--};
---- a/drivers/staging/lttng/ltt-ring-buffer-client-discard.c
-+++ /dev/null
-@@ -1,21 +0,0 @@
--/*
-- * ltt-ring-buffer-client-discard.c
-- *
-- * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
-- * LTTng lib ring buffer client (discard mode).
-- *
-- * Dual LGPL v2.1/GPL v2 license.
-- */
--
--#include <linux/module.h>
--#include "ltt-tracer.h"
--
--#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
--#define RING_BUFFER_MODE_TEMPLATE_STRING "discard"
--#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_SPLICE
--#include "ltt-ring-buffer-client.h"
--
--MODULE_LICENSE("GPL and additional rights");
--MODULE_AUTHOR("Mathieu Desnoyers");
--MODULE_DESCRIPTION("LTTng Ring Buffer Client Discard Mode");
---- a/drivers/staging/lttng/ltt-ring-buffer-client-mmap-discard.c
-+++ /dev/null
-@@ -1,21 +0,0 @@
--/*
-- * ltt-ring-buffer-client-discard.c
-- *
-- * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
-- * LTTng lib ring buffer client (discard mode).
-- *
-- * Dual LGPL v2.1/GPL v2 license.
-- */
--
--#include <linux/module.h>
--#include "ltt-tracer.h"
--
--#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
--#define RING_BUFFER_MODE_TEMPLATE_STRING "discard-mmap"
--#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_MMAP
--#include "ltt-ring-buffer-client.h"
--
--MODULE_LICENSE("GPL and additional rights");
--MODULE_AUTHOR("Mathieu Desnoyers");
--MODULE_DESCRIPTION("LTTng Ring Buffer Client Discard Mode");
---- a/drivers/staging/lttng/ltt-ring-buffer-client-mmap-overwrite.c
-+++ /dev/null
-@@ -1,21 +0,0 @@
--/*
-- * ltt-ring-buffer-client-overwrite.c
-- *
-- * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
-- * LTTng lib ring buffer client (overwrite mode).
-- *
-- * Dual LGPL v2.1/GPL v2 license.
-- */
--
--#include <linux/module.h>
--#include "ltt-tracer.h"
--
--#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_OVERWRITE
--#define RING_BUFFER_MODE_TEMPLATE_STRING "overwrite-mmap"
--#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_MMAP
--#include "ltt-ring-buffer-client.h"
--
--MODULE_LICENSE("GPL and additional rights");
--MODULE_AUTHOR("Mathieu Desnoyers");
--MODULE_DESCRIPTION("LTTng Ring Buffer Client Overwrite Mode");
---- a/drivers/staging/lttng/ltt-ring-buffer-client-overwrite.c
-+++ /dev/null
-@@ -1,21 +0,0 @@
--/*
-- * ltt-ring-buffer-client-overwrite.c
-- *
-- * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
-- * LTTng lib ring buffer client (overwrite mode).
-- *
-- * Dual LGPL v2.1/GPL v2 license.
-- */
--
--#include <linux/module.h>
--#include "ltt-tracer.h"
--
--#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_OVERWRITE
--#define RING_BUFFER_MODE_TEMPLATE_STRING "overwrite"
--#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_SPLICE
--#include "ltt-ring-buffer-client.h"
--
--MODULE_LICENSE("GPL and additional rights");
--MODULE_AUTHOR("Mathieu Desnoyers");
--MODULE_DESCRIPTION("LTTng Ring Buffer Client Overwrite Mode");
---- a/drivers/staging/lttng/ltt-ring-buffer-client.h
-+++ /dev/null
-@@ -1,569 +0,0 @@
--/*
-- * ltt-ring-buffer-client.h
-- *
-- * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
-- * LTTng lib ring buffer client template.
-- *
-- * Dual LGPL v2.1/GPL v2 license.
-- */
--
--#include <linux/module.h>
--#include <linux/types.h>
--#include "lib/bitfield.h"
--#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
--#include "wrapper/trace-clock.h"
--#include "ltt-events.h"
--#include "ltt-tracer.h"
--#include "wrapper/ringbuffer/frontend_types.h"
--
--/*
-- * Keep the natural field alignment for _each field_ within this structure if
-- * you ever add/remove a field from this header. Packed attribute is not used
-- * because gcc generates poor code on at least powerpc and mips. Don't ever
-- * let gcc add padding between the structure elements.
-- *
-- * The guarantee we have with timestamps is that all the events in a
-- * packet are included (inclusive) within the begin/end timestamps of
-- * the packet. Another guarantee we have is that the "timestamp begin",
-- * as well as the event timestamps, are monotonically increasing (never
-- * decrease) when moving forward in a stream (physically). But this
-- * guarantee does not apply to "timestamp end", because it is sampled at
-- * commit time, which is not ordered with respect to space reservation.
-- */
--
--struct packet_header {
-- /* Trace packet header */
-- uint32_t magic; /*
-- * Trace magic number.
-- * contains endianness information.
-- */
-- uint8_t uuid[16];
-- uint32_t stream_id;
--
-- struct {
-- /* Stream packet context */
-- uint64_t timestamp_begin; /* Cycle count at subbuffer start */
-- uint64_t timestamp_end; /* Cycle count at subbuffer end */
-- uint32_t events_discarded; /*
-- * Events lost in this subbuffer since
-- * the beginning of the trace.
-- * (may overflow)
-- */
-- uint32_t content_size; /* Size of data in subbuffer */
-- uint32_t packet_size; /* Subbuffer size (include padding) */
-- uint32_t cpu_id; /* CPU id associated with stream */
-- uint8_t header_end; /* End of header */
-- } ctx;
--};
--
--
--static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
--{
-- return trace_clock_read64();
--}
--
--static inline
--size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
--{
-- int i;
-- size_t orig_offset = offset;
--
-- if (likely(!ctx))
-- return 0;
-- for (i = 0; i < ctx->nr_fields; i++)
-- offset += ctx->fields[i].get_size(offset);
-- return offset - orig_offset;
--}
--
--static inline
--void ctx_record(struct lib_ring_buffer_ctx *bufctx,
-- struct ltt_channel *chan,
-- struct lttng_ctx *ctx)
--{
-- int i;
--
-- if (likely(!ctx))
-- return;
-- for (i = 0; i < ctx->nr_fields; i++)
-- ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
--}
--
--/*
-- * record_header_size - Calculate the header size and padding necessary.
-- * @config: ring buffer instance configuration
-- * @chan: channel
-- * @offset: offset in the write buffer
-- * @pre_header_padding: padding to add before the header (output)
-- * @ctx: reservation context
-- *
-- * Returns the event header size (including padding).
-- *
-- * The payload must itself determine its own alignment from the biggest type it
-- * contains.
-- */
--static __inline__
--unsigned char record_header_size(const struct lib_ring_buffer_config *config,
-- struct channel *chan, size_t offset,
-- size_t *pre_header_padding,
-- struct lib_ring_buffer_ctx *ctx)
--{
-- struct ltt_channel *ltt_chan = channel_get_private(chan);
-- struct ltt_event *event = ctx->priv;
-- size_t orig_offset = offset;
-- size_t padding;
--
-- switch (ltt_chan->header_type) {
-- case 1: /* compact */
-- padding = lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
-- offset += padding;
-- if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
-- offset += sizeof(uint32_t); /* id and timestamp */
-- } else {
-- /* Minimum space taken by 5-bit id */
-- offset += sizeof(uint8_t);
-- /* Align extended struct on largest member */
-- offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
-- offset += sizeof(uint32_t); /* id */
-- offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
-- offset += sizeof(uint64_t); /* timestamp */
-- }
-- break;
-- case 2: /* large */
-- padding = lib_ring_buffer_align(offset, ltt_alignof(uint16_t));
-- offset += padding;
-- offset += sizeof(uint16_t);
-- if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
-- offset += lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
-- offset += sizeof(uint32_t); /* timestamp */
-- } else {
-- /* Align extended struct on largest member */
-- offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
-- offset += sizeof(uint32_t); /* id */
-- offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
-- offset += sizeof(uint64_t); /* timestamp */
-- }
-- break;
-- default:
-- padding = 0;
-- WARN_ON_ONCE(1);
-- }
-- offset += ctx_get_size(offset, event->ctx);
-- offset += ctx_get_size(offset, ltt_chan->ctx);
--
-- *pre_header_padding = padding;
-- return offset - orig_offset;
--}
--
--#include "wrapper/ringbuffer/api.h"
--
--static
--void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
-- struct lib_ring_buffer_ctx *ctx,
-- uint32_t event_id);
--
--/*
-- * ltt_write_event_header
-- *
-- * Writes the event header to the offset (already aligned on 32-bits).
-- *
-- * @config: ring buffer instance configuration
-- * @ctx: reservation context
-- * @event_id: event ID
-- */
--static __inline__
--void ltt_write_event_header(const struct lib_ring_buffer_config *config,
-- struct lib_ring_buffer_ctx *ctx,
-- uint32_t event_id)
--{
-- struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
-- struct ltt_event *event = ctx->priv;
--
-- if (unlikely(ctx->rflags))
-- goto slow_path;
--
-- switch (ltt_chan->header_type) {
-- case 1: /* compact */
-- {
-- uint32_t id_time = 0;
--
-- bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id);
-- bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
-- lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
-- break;
-- }
-- case 2: /* large */
-- {
-- uint32_t timestamp = (uint32_t) ctx->tsc;
-- uint16_t id = event_id;
--
-- lib_ring_buffer_write(config, ctx, &id, sizeof(id));
-- lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
-- lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
-- break;
-- }
-- default:
-- WARN_ON_ONCE(1);
-- }
--
-- ctx_record(ctx, ltt_chan, ltt_chan->ctx);
-- ctx_record(ctx, ltt_chan, event->ctx);
-- lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
--
-- return;
--
--slow_path:
-- ltt_write_event_header_slow(config, ctx, event_id);
--}
--
--static
--void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
-- struct lib_ring_buffer_ctx *ctx,
-- uint32_t event_id)
--{
-- struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
-- struct ltt_event *event = ctx->priv;
--
-- switch (ltt_chan->header_type) {
-- case 1: /* compact */
-- if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
-- uint32_t id_time = 0;
--
-- bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id);
-- bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
-- lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
-- } else {
-- uint8_t id = 0;
-- uint64_t timestamp = ctx->tsc;
--
-- bt_bitfield_write(&id, uint8_t, 0, 5, 31);
-- lib_ring_buffer_write(config, ctx, &id, sizeof(id));
-- /* Align extended struct on largest member */
-- lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
-- lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
-- lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
-- lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
-- }
-- break;
-- case 2: /* large */
-- {
-- if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
-- uint32_t timestamp = (uint32_t) ctx->tsc;
-- uint16_t id = event_id;
--
-- lib_ring_buffer_write(config, ctx, &id, sizeof(id));
-- lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
-- lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
-- } else {
-- uint16_t id = 65535;
-- uint64_t timestamp = ctx->tsc;
--
-- lib_ring_buffer_write(config, ctx, &id, sizeof(id));
-- /* Align extended struct on largest member */
-- lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
-- lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
-- lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
-- lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
-- }
-- break;
-- }
-- default:
-- WARN_ON_ONCE(1);
-- }
-- ctx_record(ctx, ltt_chan, ltt_chan->ctx);
-- ctx_record(ctx, ltt_chan, event->ctx);
-- lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
--}
--
--static const struct lib_ring_buffer_config client_config;
--
--static u64 client_ring_buffer_clock_read(struct channel *chan)
--{
-- return lib_ring_buffer_clock_read(chan);
--}
--
--static
--size_t client_record_header_size(const struct lib_ring_buffer_config *config,
-- struct channel *chan, size_t offset,
-- size_t *pre_header_padding,
-- struct lib_ring_buffer_ctx *ctx)
--{
-- return record_header_size(config, chan, offset,
-- pre_header_padding, ctx);
--}
--
--/**
-- * client_packet_header_size - called on buffer-switch to a new sub-buffer
-- *
-- * Return header size without padding after the structure. Don't use packed
-- * structure because gcc generates inefficient code on some architectures
-- * (powerpc, mips..)
-- */
--static size_t client_packet_header_size(void)
--{
-- return offsetof(struct packet_header, ctx.header_end);
--}
--
--static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
-- unsigned int subbuf_idx)
--{
-- struct channel *chan = buf->backend.chan;
-- struct packet_header *header =
-- (struct packet_header *)
-- lib_ring_buffer_offset_address(&buf->backend,
-- subbuf_idx * chan->backend.subbuf_size);
-- struct ltt_channel *ltt_chan = channel_get_private(chan);
-- struct ltt_session *session = ltt_chan->session;
--
-- header->magic = CTF_MAGIC_NUMBER;
-- memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
-- header->stream_id = ltt_chan->id;
-- header->ctx.timestamp_begin = tsc;
-- header->ctx.timestamp_end = 0;
-- header->ctx.events_discarded = 0;
-- header->ctx.content_size = 0xFFFFFFFF; /* for debugging */
-- header->ctx.packet_size = 0xFFFFFFFF;
-- header->ctx.cpu_id = buf->backend.cpu;
--}
--
--/*
-- * offset is assumed to never be 0 here : never deliver a completely empty
-- * subbuffer. data_size is between 1 and subbuf_size.
-- */
--static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
-- unsigned int subbuf_idx, unsigned long data_size)
--{
-- struct channel *chan = buf->backend.chan;
-- struct packet_header *header =
-- (struct packet_header *)
-- lib_ring_buffer_offset_address(&buf->backend,
-- subbuf_idx * chan->backend.subbuf_size);
-- unsigned long records_lost = 0;
--
-- header->ctx.timestamp_end = tsc;
-- header->ctx.content_size = data_size * CHAR_BIT; /* in bits */
-- header->ctx.packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
-- records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
-- records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
-- records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
-- header->ctx.events_discarded = records_lost;
--}
--
--static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
-- int cpu, const char *name)
--{
-- return 0;
--}
--
--static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
--{
--}
--
--static const struct lib_ring_buffer_config client_config = {
-- .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
-- .cb.record_header_size = client_record_header_size,
-- .cb.subbuffer_header_size = client_packet_header_size,
-- .cb.buffer_begin = client_buffer_begin,
-- .cb.buffer_end = client_buffer_end,
-- .cb.buffer_create = client_buffer_create,
-- .cb.buffer_finalize = client_buffer_finalize,
--
-- .tsc_bits = 32,
-- .alloc = RING_BUFFER_ALLOC_PER_CPU,
-- .sync = RING_BUFFER_SYNC_PER_CPU,
-- .mode = RING_BUFFER_MODE_TEMPLATE,
-- .backend = RING_BUFFER_PAGE,
-- .output = RING_BUFFER_OUTPUT_TEMPLATE,
-- .oops = RING_BUFFER_OOPS_CONSISTENCY,
-- .ipi = RING_BUFFER_IPI_BARRIER,
-- .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
--};
--
--static
--struct channel *_channel_create(const char *name,
-- struct ltt_channel *ltt_chan, void *buf_addr,
-- size_t subbuf_size, size_t num_subbuf,
-- unsigned int switch_timer_interval,
-- unsigned int read_timer_interval)
--{
-- return channel_create(&client_config, name, ltt_chan, buf_addr,
-- subbuf_size, num_subbuf, switch_timer_interval,
-- read_timer_interval);
--}
--
--static
--void ltt_channel_destroy(struct channel *chan)
--{
-- channel_destroy(chan);
--}
--
--static
--struct lib_ring_buffer *ltt_buffer_read_open(struct channel *chan)
--{
-- struct lib_ring_buffer *buf;
-- int cpu;
--
-- for_each_channel_cpu(cpu, chan) {
-- buf = channel_get_ring_buffer(&client_config, chan, cpu);
-- if (!lib_ring_buffer_open_read(buf))
-- return buf;
-- }
-- return NULL;
--}
--
--static
--int ltt_buffer_has_read_closed_stream(struct channel *chan)
--{
-- struct lib_ring_buffer *buf;
-- int cpu;
--
-- for_each_channel_cpu(cpu, chan) {
-- buf = channel_get_ring_buffer(&client_config, chan, cpu);
-- if (!atomic_long_read(&buf->active_readers))
-- return 1;
-- }
-- return 0;
--}
--
--static
--void ltt_buffer_read_close(struct lib_ring_buffer *buf)
--{
-- lib_ring_buffer_release_read(buf);
--}
--
--static
--int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx,
-- uint32_t event_id)
--{
-- struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
-- int ret, cpu;
--
-- cpu = lib_ring_buffer_get_cpu(&client_config);
-- if (cpu < 0)
-- return -EPERM;
-- ctx->cpu = cpu;
--
-- switch (ltt_chan->header_type) {
-- case 1: /* compact */
-- if (event_id > 30)
-- ctx->rflags |= LTT_RFLAG_EXTENDED;
-- break;
-- case 2: /* large */
-- if (event_id > 65534)
-- ctx->rflags |= LTT_RFLAG_EXTENDED;
-- break;
-- default:
-- WARN_ON_ONCE(1);
-- }
--
-- ret = lib_ring_buffer_reserve(&client_config, ctx);
-- if (ret)
-- goto put;
-- ltt_write_event_header(&client_config, ctx, event_id);
-- return 0;
--put:
-- lib_ring_buffer_put_cpu(&client_config);
-- return ret;
--}
--
--static
--void ltt_event_commit(struct lib_ring_buffer_ctx *ctx)
--{
-- lib_ring_buffer_commit(&client_config, ctx);
-- lib_ring_buffer_put_cpu(&client_config);
--}
--
--static
--void ltt_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
-- size_t len)
--{
-- lib_ring_buffer_write(&client_config, ctx, src, len);
--}
--
--static
--void ltt_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
-- const void __user *src, size_t len)
--{
-- lib_ring_buffer_copy_from_user(&client_config, ctx, src, len);
--}
--
--static
--void ltt_event_memset(struct lib_ring_buffer_ctx *ctx,
-- int c, size_t len)
--{
-- lib_ring_buffer_memset(&client_config, ctx, c, len);
--}
--
--static
--wait_queue_head_t *ltt_get_writer_buf_wait_queue(struct channel *chan, int cpu)
--{
-- struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
-- chan, cpu);
-- return &buf->write_wait;
--}
--
--static
--wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
--{
-- return &chan->hp_wait;
--}
--
--static
--int ltt_is_finalized(struct channel *chan)
--{
-- return lib_ring_buffer_channel_is_finalized(chan);
--}
--
--static
--int ltt_is_disabled(struct channel *chan)
--{
-- return lib_ring_buffer_channel_is_disabled(chan);
--}
--
--static struct ltt_transport ltt_relay_transport = {
-- .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
-- .owner = THIS_MODULE,
-- .ops = {
-- .channel_create = _channel_create,
-- .channel_destroy = ltt_channel_destroy,
-- .buffer_read_open = ltt_buffer_read_open,
-- .buffer_has_read_closed_stream =
-- ltt_buffer_has_read_closed_stream,
-- .buffer_read_close = ltt_buffer_read_close,
-- .event_reserve = ltt_event_reserve,
-- .event_commit = ltt_event_commit,
-- .event_write = ltt_event_write,
-- .event_write_from_user = ltt_event_write_from_user,
-- .event_memset = ltt_event_memset,
-- .packet_avail_size = NULL, /* Would be racy anyway */
-- .get_writer_buf_wait_queue = ltt_get_writer_buf_wait_queue,
-- .get_hp_wait_queue = ltt_get_hp_wait_queue,
-- .is_finalized = ltt_is_finalized,
-- .is_disabled = ltt_is_disabled,
-- },
--};
--
--static int __init ltt_ring_buffer_client_init(void)
--{
-- /*
-- * This vmalloc sync all also takes care of the lib ring buffer
-- * vmalloc'd module pages when it is built as a module into LTTng.
-- */
-- wrapper_vmalloc_sync_all();
-- ltt_transport_register(&ltt_relay_transport);
-- return 0;
--}
--
--module_init(ltt_ring_buffer_client_init);
--
--static void __exit ltt_ring_buffer_client_exit(void)
--{
-- ltt_transport_unregister(&ltt_relay_transport);
--}
--
--module_exit(ltt_ring_buffer_client_exit);
--
--MODULE_LICENSE("GPL and additional rights");
--MODULE_AUTHOR("Mathieu Desnoyers");
--MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
-- " client");
---- a/drivers/staging/lttng/ltt-ring-buffer-metadata-client.c
-+++ /dev/null
-@@ -1,21 +0,0 @@
--/*
-- * ltt-ring-buffer-metadata-client.c
-- *
-- * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
-- * LTTng lib ring buffer metadta client.
-- *
-- * Dual LGPL v2.1/GPL v2 license.
-- */
--
--#include <linux/module.h>
--#include "ltt-tracer.h"
--
--#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
--#define RING_BUFFER_MODE_TEMPLATE_STRING "metadata"
--#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_SPLICE
--#include "ltt-ring-buffer-metadata-client.h"
--
--MODULE_LICENSE("GPL and additional rights");
--MODULE_AUTHOR("Mathieu Desnoyers");
--MODULE_DESCRIPTION("LTTng Ring Buffer Metadata Client");
---- a/drivers/staging/lttng/ltt-ring-buffer-metadata-client.h
-+++ /dev/null
-@@ -1,330 +0,0 @@
--/*
-- * ltt-ring-buffer-client.h
-- *
-- * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
-- * LTTng lib ring buffer client template.
-- *
-- * Dual LGPL v2.1/GPL v2 license.
-- */
--
--#include <linux/module.h>
--#include <linux/types.h>
--#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
--#include "ltt-events.h"
--#include "ltt-tracer.h"
--
--struct metadata_packet_header {
-- uint32_t magic; /* 0x75D11D57 */
-- uint8_t uuid[16]; /* Unique Universal Identifier */
-- uint32_t checksum; /* 0 if unused */
-- uint32_t content_size; /* in bits */
-- uint32_t packet_size; /* in bits */
-- uint8_t compression_scheme; /* 0 if unused */
-- uint8_t encryption_scheme; /* 0 if unused */
-- uint8_t checksum_scheme; /* 0 if unused */
-- uint8_t major; /* CTF spec major version number */
-- uint8_t minor; /* CTF spec minor version number */
-- uint8_t header_end[0];
--};
--
--struct metadata_record_header {
-- uint8_t header_end[0]; /* End of header */
--};
--
--static const struct lib_ring_buffer_config client_config;
--
--static inline
--u64 lib_ring_buffer_clock_read(struct channel *chan)
--{
-- return 0;
--}
--
--static inline
--unsigned char record_header_size(const struct lib_ring_buffer_config *config,
-- struct channel *chan, size_t offset,
-- size_t *pre_header_padding,
-- struct lib_ring_buffer_ctx *ctx)
--{
-- return 0;
--}
--
--#include "wrapper/ringbuffer/api.h"
--
--static u64 client_ring_buffer_clock_read(struct channel *chan)
--{
-- return 0;
--}
--
--static
--size_t client_record_header_size(const struct lib_ring_buffer_config *config,
-- struct channel *chan, size_t offset,
-- size_t *pre_header_padding,
-- struct lib_ring_buffer_ctx *ctx)
--{
-- return 0;
--}
--
--/**
-- * client_packet_header_size - called on buffer-switch to a new sub-buffer
-- *
-- * Return header size without padding after the structure. Don't use packed
-- * structure because gcc generates inefficient code on some architectures
-- * (powerpc, mips..)
-- */
--static size_t client_packet_header_size(void)
--{
-- return offsetof(struct metadata_packet_header, header_end);
--}
--
--static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
-- unsigned int subbuf_idx)
--{
-- struct channel *chan = buf->backend.chan;
-- struct metadata_packet_header *header =
-- (struct metadata_packet_header *)
-- lib_ring_buffer_offset_address(&buf->backend,
-- subbuf_idx * chan->backend.subbuf_size);
-- struct ltt_channel *ltt_chan = channel_get_private(chan);
-- struct ltt_session *session = ltt_chan->session;
--
-- header->magic = TSDL_MAGIC_NUMBER;
-- memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
-- header->checksum = 0; /* 0 if unused */
-- header->content_size = 0xFFFFFFFF; /* in bits, for debugging */
-- header->packet_size = 0xFFFFFFFF; /* in bits, for debugging */
-- header->compression_scheme = 0; /* 0 if unused */
-- header->encryption_scheme = 0; /* 0 if unused */
-- header->checksum_scheme = 0; /* 0 if unused */
-- header->major = CTF_SPEC_MAJOR;
-- header->minor = CTF_SPEC_MINOR;
--}
--
--/*
-- * offset is assumed to never be 0 here : never deliver a completely empty
-- * subbuffer. data_size is between 1 and subbuf_size.
-- */
--static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
-- unsigned int subbuf_idx, unsigned long data_size)
--{
-- struct channel *chan = buf->backend.chan;
-- struct metadata_packet_header *header =
-- (struct metadata_packet_header *)
-- lib_ring_buffer_offset_address(&buf->backend,
-- subbuf_idx * chan->backend.subbuf_size);
-- unsigned long records_lost = 0;
--
-- header->content_size = data_size * CHAR_BIT; /* in bits */
-- header->packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
-- /*
-- * We do not care about the records lost count, because the metadata
-- * channel waits and retry.
-- */
-- (void) lib_ring_buffer_get_records_lost_full(&client_config, buf);
-- records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
-- records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
-- WARN_ON_ONCE(records_lost != 0);
--}
--
--static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
-- int cpu, const char *name)
--{
-- return 0;
--}
--
--static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
--{
--}
--
--static const struct lib_ring_buffer_config client_config = {
-- .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
-- .cb.record_header_size = client_record_header_size,
-- .cb.subbuffer_header_size = client_packet_header_size,
-- .cb.buffer_begin = client_buffer_begin,
-- .cb.buffer_end = client_buffer_end,
-- .cb.buffer_create = client_buffer_create,
-- .cb.buffer_finalize = client_buffer_finalize,
--
-- .tsc_bits = 0,
-- .alloc = RING_BUFFER_ALLOC_GLOBAL,
-- .sync = RING_BUFFER_SYNC_GLOBAL,
-- .mode = RING_BUFFER_MODE_TEMPLATE,
-- .backend = RING_BUFFER_PAGE,
-- .output = RING_BUFFER_OUTPUT_TEMPLATE,
-- .oops = RING_BUFFER_OOPS_CONSISTENCY,
-- .ipi = RING_BUFFER_IPI_BARRIER,
-- .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
--};
--
--static
--struct channel *_channel_create(const char *name,
-- struct ltt_channel *ltt_chan, void *buf_addr,
-- size_t subbuf_size, size_t num_subbuf,
-- unsigned int switch_timer_interval,
-- unsigned int read_timer_interval)
--{
-- return channel_create(&client_config, name, ltt_chan, buf_addr,
-- subbuf_size, num_subbuf, switch_timer_interval,
-- read_timer_interval);
--}
--
--static
--void ltt_channel_destroy(struct channel *chan)
--{
-- channel_destroy(chan);
--}
--
--static
--struct lib_ring_buffer *ltt_buffer_read_open(struct channel *chan)
--{
-- struct lib_ring_buffer *buf;
--
-- buf = channel_get_ring_buffer(&client_config, chan, 0);
-- if (!lib_ring_buffer_open_read(buf))
-- return buf;
-- return NULL;
--}
--
--static
--int ltt_buffer_has_read_closed_stream(struct channel *chan)
--{
-- struct lib_ring_buffer *buf;
-- int cpu;
--
-- for_each_channel_cpu(cpu, chan) {
-- buf = channel_get_ring_buffer(&client_config, chan, cpu);
-- if (!atomic_long_read(&buf->active_readers))
-- return 1;
-- }
-- return 0;
--}
--
--static
--void ltt_buffer_read_close(struct lib_ring_buffer *buf)
--{
-- lib_ring_buffer_release_read(buf);
--}
--
--static
--int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id)
--{
-- return lib_ring_buffer_reserve(&client_config, ctx);
--}
--
--static
--void ltt_event_commit(struct lib_ring_buffer_ctx *ctx)
--{
-- lib_ring_buffer_commit(&client_config, ctx);
--}
--
--static
--void ltt_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
-- size_t len)
--{
-- lib_ring_buffer_write(&client_config, ctx, src, len);
--}
--
--static
--void ltt_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
-- const void __user *src, size_t len)
--{
-- lib_ring_buffer_copy_from_user(&client_config, ctx, src, len);
--}
--
--static
--void ltt_event_memset(struct lib_ring_buffer_ctx *ctx,
-- int c, size_t len)
--{
-- lib_ring_buffer_memset(&client_config, ctx, c, len);
--}
--
--static
--size_t ltt_packet_avail_size(struct channel *chan)
--
--{
-- unsigned long o_begin;
-- struct lib_ring_buffer *buf;
--
-- buf = chan->backend.buf; /* Only for global buffer ! */
-- o_begin = v_read(&client_config, &buf->offset);
-- if (subbuf_offset(o_begin, chan) != 0) {
-- return chan->backend.subbuf_size - subbuf_offset(o_begin, chan);
-- } else {
-- return chan->backend.subbuf_size - subbuf_offset(o_begin, chan)
-- - sizeof(struct metadata_packet_header);
-- }
--}
--
--static
--wait_queue_head_t *ltt_get_writer_buf_wait_queue(struct channel *chan, int cpu)
--{
-- struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
-- chan, cpu);
-- return &buf->write_wait;
--}
--
--static
--wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
--{
-- return &chan->hp_wait;
--}
--
--static
--int ltt_is_finalized(struct channel *chan)
--{
-- return lib_ring_buffer_channel_is_finalized(chan);
--}
--
--static
--int ltt_is_disabled(struct channel *chan)
--{
-- return lib_ring_buffer_channel_is_disabled(chan);
--}
--
--static struct ltt_transport ltt_relay_transport = {
-- .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
-- .owner = THIS_MODULE,
-- .ops = {
-- .channel_create = _channel_create,
-- .channel_destroy = ltt_channel_destroy,
-- .buffer_read_open = ltt_buffer_read_open,
-- .buffer_has_read_closed_stream =
-- ltt_buffer_has_read_closed_stream,
-- .buffer_read_close = ltt_buffer_read_close,
-- .event_reserve = ltt_event_reserve,
-- .event_commit = ltt_event_commit,
-- .event_write_from_user = ltt_event_write_from_user,
-- .event_memset = ltt_event_memset,
-- .event_write = ltt_event_write,
-- .packet_avail_size = ltt_packet_avail_size,
-- .get_writer_buf_wait_queue = ltt_get_writer_buf_wait_queue,
-- .get_hp_wait_queue = ltt_get_hp_wait_queue,
-- .is_finalized = ltt_is_finalized,
-- .is_disabled = ltt_is_disabled,
-- },
--};
--
--static int __init ltt_ring_buffer_client_init(void)
--{
-- /*
-- * This vmalloc sync all also takes care of the lib ring buffer
-- * vmalloc'd module pages when it is built as a module into LTTng.
-- */
-- wrapper_vmalloc_sync_all();
-- ltt_transport_register(&ltt_relay_transport);
-- return 0;
--}
--
--module_init(ltt_ring_buffer_client_init);
--
--static void __exit ltt_ring_buffer_client_exit(void)
--{
-- ltt_transport_unregister(&ltt_relay_transport);
--}
--
--module_exit(ltt_ring_buffer_client_exit);
--
--MODULE_LICENSE("GPL and additional rights");
--MODULE_AUTHOR("Mathieu Desnoyers");
--MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
-- " client");
---- a/drivers/staging/lttng/ltt-ring-buffer-metadata-mmap-client.c
-+++ /dev/null
-@@ -1,21 +0,0 @@
--/*
-- * ltt-ring-buffer-metadata-client.c
-- *
-- * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
-- * LTTng lib ring buffer metadta client.
-- *
-- * Dual LGPL v2.1/GPL v2 license.
-- */
--
--#include <linux/module.h>
--#include "ltt-tracer.h"
--
--#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
--#define RING_BUFFER_MODE_TEMPLATE_STRING "metadata-mmap"
--#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_MMAP
--#include "ltt-ring-buffer-metadata-client.h"
--
--MODULE_LICENSE("GPL and additional rights");
--MODULE_AUTHOR("Mathieu Desnoyers");
--MODULE_DESCRIPTION("LTTng Ring Buffer Metadata Client");
---- a/drivers/staging/lttng/ltt-tracer-core.h
-+++ /dev/null
-@@ -1,28 +0,0 @@
--#ifndef LTT_TRACER_CORE_H
--#define LTT_TRACER_CORE_H
--
--/*
-- * ltt-tracer-core.h
-- *
-- * Copyright (C) 2005-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
-- * This contains the core definitions for the Linux Trace Toolkit.
-- *
-- * Dual LGPL v2.1/GPL v2 license.
-- */
--
--#include <linux/list.h>
--#include <linux/percpu.h>
--
--#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
--/* Align data on its natural alignment */
--#define RING_BUFFER_ALIGN
--#endif
--
--#include "wrapper/ringbuffer/config.h"
--
--struct ltt_session;
--struct ltt_channel;
--struct ltt_event;
--
--#endif /* LTT_TRACER_CORE_H */
---- a/drivers/staging/lttng/ltt-tracer.h
-+++ /dev/null
-@@ -1,67 +0,0 @@
--#ifndef _LTT_TRACER_H
--#define _LTT_TRACER_H
--
--/*
-- * ltt-tracer.h
-- *
-- * Copyright (C) 2005-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
-- * This contains the definitions for the Linux Trace Toolkit tracer.
-- *
-- * Dual LGPL v2.1/GPL v2 license.
-- */
--
--#include <stdarg.h>
--#include <linux/types.h>
--#include <linux/limits.h>
--#include <linux/list.h>
--#include <linux/cache.h>
--#include <linux/timex.h>
--#include <linux/wait.h>
--#include <asm/atomic.h>
--#include <asm/local.h>
--
--#include "wrapper/trace-clock.h"
--#include "ltt-tracer-core.h"
--#include "ltt-events.h"
--
--#define LTTNG_VERSION 0
--#define LTTNG_PATCHLEVEL 9
--#define LTTNG_SUBLEVEL 1
--
--#ifndef CHAR_BIT
--#define CHAR_BIT 8
--#endif
--
--/* Number of bytes to log with a read/write event */
--#define LTT_LOG_RW_SIZE 32L
--#define LTT_MAX_SMALL_SIZE 0xFFFFU
--
--#ifdef RING_BUFFER_ALIGN
--#define ltt_alignof(type) __alignof__(type)
--#else
--#define ltt_alignof(type) 1
--#endif
--
--/* Tracer properties */
--#define CTF_MAGIC_NUMBER 0xC1FC1FC1
--#define TSDL_MAGIC_NUMBER 0x75D11D57
--
--/* CTF specification version followed */
--#define CTF_SPEC_MAJOR 1
--#define CTF_SPEC_MINOR 8
--
--/* Tracer major/minor versions */
--#define CTF_VERSION_MAJOR 0
--#define CTF_VERSION_MINOR 1
--
--/*
-- * Number of milliseconds to retry before failing metadata writes on buffer full
-- * condition. (10 seconds)
-- */
--#define LTTNG_METADATA_TIMEOUT_MSEC 10000
--
--#define LTT_RFLAG_EXTENDED RING_BUFFER_RFLAG_END
--#define LTT_RFLAG_END (LTT_RFLAG_EXTENDED << 1)
--
--#endif /* _LTT_TRACER_H */
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-abi.c
-@@ -0,0 +1,781 @@
-+/*
-+ * lttng-abi.c
-+ *
-+ * LTTng ABI
-+ *
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ *
-+ *
-+ * Mimic system calls for:
-+ * - session creation, returns a file descriptor or failure.
-+ * - channel creation, returns a file descriptor or failure.
-+ * - Operates on a session file descriptor
-+ * - Takes all channel options as parameters.
-+ * - stream get, returns a file descriptor or failure.
-+ * - Operates on a channel file descriptor.
-+ * - stream notifier get, returns a file descriptor or failure.
-+ * - Operates on a channel file descriptor.
-+ * - event creation, returns a file descriptor or failure.
-+ * - Operates on a channel file descriptor
-+ * - Takes an event name as parameter
-+ * - Takes an instrumentation source as parameter
-+ * - e.g. tracepoints, dynamic_probes...
-+ * - Takes instrumentation source specific arguments.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/proc_fs.h>
-+#include <linux/anon_inodes.h>
-+#include <linux/file.h>
-+#include <linux/uaccess.h>
-+#include <linux/slab.h>
-+#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
-+#include "wrapper/ringbuffer/vfs.h"
-+#include "wrapper/poll.h"
-+#include "lttng-abi.h"
-+#include "lttng-events.h"
-+#include "lttng-tracer.h"
-+
-+/*
-+ * This is LTTng's own personal way to create a system call as an external
-+ * module. We use ioctl() on /proc/lttng.
-+ */
-+
-+static struct proc_dir_entry *lttng_proc_dentry;
-+static const struct file_operations lttng_fops;
-+static const struct file_operations lttng_session_fops;
-+static const struct file_operations lttng_channel_fops;
-+static const struct file_operations lttng_metadata_fops;
-+static const struct file_operations lttng_event_fops;
-+
-+/*
-+ * Teardown management: opened file descriptors keep a refcount on the module,
-+ * so it can only exit when all file descriptors are closed.
-+ */
-+
-+enum channel_type {
-+ PER_CPU_CHANNEL,
-+ METADATA_CHANNEL,
-+};
-+
-+static
-+int lttng_abi_create_session(void)
-+{
-+ struct lttng_session *session;
-+ struct file *session_file;
-+ int session_fd, ret;
-+
-+ session = lttng_session_create();
-+ if (!session)
-+ return -ENOMEM;
-+ session_fd = get_unused_fd();
-+ if (session_fd < 0) {
-+ ret = session_fd;
-+ goto fd_error;
-+ }
-+ session_file = anon_inode_getfile("[lttng_session]",
-+ &lttng_session_fops,
-+ session, O_RDWR);
-+ if (IS_ERR(session_file)) {
-+ ret = PTR_ERR(session_file);
-+ goto file_error;
-+ }
-+ session->file = session_file;
-+ fd_install(session_fd, session_file);
-+ return session_fd;
-+
-+file_error:
-+ put_unused_fd(session_fd);
-+fd_error:
-+ lttng_session_destroy(session);
-+ return ret;
-+}
-+
-+static
-+int lttng_abi_tracepoint_list(void)
-+{
-+ struct file *tracepoint_list_file;
-+ int file_fd, ret;
-+
-+ file_fd = get_unused_fd();
-+ if (file_fd < 0) {
-+ ret = file_fd;
-+ goto fd_error;
-+ }
-+
-+ tracepoint_list_file = anon_inode_getfile("[lttng_session]",
-+ &lttng_tracepoint_list_fops,
-+ NULL, O_RDWR);
-+ if (IS_ERR(tracepoint_list_file)) {
-+ ret = PTR_ERR(tracepoint_list_file);
-+ goto file_error;
-+ }
-+ ret = lttng_tracepoint_list_fops.open(NULL, tracepoint_list_file);
-+ if (ret < 0)
-+ goto open_error;
-+ fd_install(file_fd, tracepoint_list_file);
-+ if (file_fd < 0) {
-+ ret = file_fd;
-+ goto fd_error;
-+ }
-+ return file_fd;
-+
-+open_error:
-+ fput(tracepoint_list_file);
-+file_error:
-+ put_unused_fd(file_fd);
-+fd_error:
-+ return ret;
-+}
-+
-+static
-+long lttng_abi_tracer_version(struct file *file,
-+ struct lttng_kernel_tracer_version __user *uversion_param)
-+{
-+ struct lttng_kernel_tracer_version v;
-+
-+ v.major = LTTNG_MODULES_MAJOR_VERSION;
-+ v.minor = LTTNG_MODULES_MINOR_VERSION;
-+ v.patchlevel = LTTNG_MODULES_PATCHLEVEL_VERSION;
-+
-+ if (copy_to_user(uversion_param, &v, sizeof(v)))
-+ return -EFAULT;
-+ return 0;
-+}
-+
-+static
-+long lttng_abi_add_context(struct file *file,
-+ struct lttng_kernel_context __user *ucontext_param,
-+ struct lttng_ctx **ctx, struct lttng_session *session)
-+{
-+ struct lttng_kernel_context context_param;
-+
-+ if (session->been_active)
-+ return -EPERM;
-+
-+ if (copy_from_user(&context_param, ucontext_param, sizeof(context_param)))
-+ return -EFAULT;
-+
-+ switch (context_param.ctx) {
-+ case LTTNG_KERNEL_CONTEXT_PID:
-+ return lttng_add_pid_to_ctx(ctx);
-+ case LTTNG_KERNEL_CONTEXT_PRIO:
-+ return lttng_add_prio_to_ctx(ctx);
-+ case LTTNG_KERNEL_CONTEXT_NICE:
-+ return lttng_add_nice_to_ctx(ctx);
-+ case LTTNG_KERNEL_CONTEXT_VPID:
-+ return lttng_add_vpid_to_ctx(ctx);
-+ case LTTNG_KERNEL_CONTEXT_TID:
-+ return lttng_add_tid_to_ctx(ctx);
-+ case LTTNG_KERNEL_CONTEXT_VTID:
-+ return lttng_add_vtid_to_ctx(ctx);
-+ case LTTNG_KERNEL_CONTEXT_PPID:
-+ return lttng_add_ppid_to_ctx(ctx);
-+ case LTTNG_KERNEL_CONTEXT_VPPID:
-+ return lttng_add_vppid_to_ctx(ctx);
-+ case LTTNG_KERNEL_CONTEXT_PERF_COUNTER:
-+ context_param.u.perf_counter.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
-+ return lttng_add_perf_counter_to_ctx(context_param.u.perf_counter.type,
-+ context_param.u.perf_counter.config,
-+ context_param.u.perf_counter.name,
-+ ctx);
-+ case LTTNG_KERNEL_CONTEXT_PROCNAME:
-+ return lttng_add_procname_to_ctx(ctx);
-+ default:
-+ return -EINVAL;
-+ }
-+}
-+
-+/**
-+ * lttng_ioctl - lttng syscall through ioctl
-+ *
-+ * @file: the file
-+ * @cmd: the command
-+ * @arg: command arg
-+ *
-+ * This ioctl implements lttng commands:
-+ * LTTNG_KERNEL_SESSION
-+ * Returns a LTTng trace session file descriptor
-+ * LTTNG_KERNEL_TRACER_VERSION
-+ * Returns the LTTng kernel tracer version
-+ * LTTNG_KERNEL_TRACEPOINT_LIST
-+ * Returns a file descriptor listing available tracepoints
-+ * LTTNG_KERNEL_WAIT_QUIESCENT
-+ * Returns after all previously running probes have completed
-+ *
-+ * The returned session will be deleted when its file descriptor is closed.
-+ */
-+static
-+long lttng_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-+{
-+ switch (cmd) {
-+ case LTTNG_KERNEL_SESSION:
-+ return lttng_abi_create_session();
-+ case LTTNG_KERNEL_TRACER_VERSION:
-+ return lttng_abi_tracer_version(file,
-+ (struct lttng_kernel_tracer_version __user *) arg);
-+ case LTTNG_KERNEL_TRACEPOINT_LIST:
-+ return lttng_abi_tracepoint_list();
-+ case LTTNG_KERNEL_WAIT_QUIESCENT:
-+ synchronize_trace();
-+ return 0;
-+ case LTTNG_KERNEL_CALIBRATE:
-+ {
-+ struct lttng_kernel_calibrate __user *ucalibrate =
-+ (struct lttng_kernel_calibrate __user *) arg;
-+ struct lttng_kernel_calibrate calibrate;
-+ int ret;
-+
-+ if (copy_from_user(&calibrate, ucalibrate, sizeof(calibrate)))
-+ return -EFAULT;
-+ ret = lttng_calibrate(&calibrate);
-+ if (copy_to_user(ucalibrate, &calibrate, sizeof(calibrate)))
-+ return -EFAULT;
-+ return ret;
-+ }
-+ default:
-+ return -ENOIOCTLCMD;
-+ }
-+}
-+
-+static const struct file_operations lttng_fops = {
-+ .owner = THIS_MODULE,
-+ .unlocked_ioctl = lttng_ioctl,
-+#ifdef CONFIG_COMPAT
-+ .compat_ioctl = lttng_ioctl,
-+#endif
-+};
-+
-+/*
-+ * We tolerate no failure in this function (if one happens, we print a dmesg
-+ * error, but cannot return any error, because the channel information is
-+ * invariant.
-+ */
-+static
-+void lttng_metadata_create_events(struct file *channel_file)
-+{
-+ struct lttng_channel *channel = channel_file->private_data;
-+ static struct lttng_kernel_event metadata_params = {
-+ .instrumentation = LTTNG_KERNEL_TRACEPOINT,
-+ .name = "lttng_metadata",
-+ };
-+ struct lttng_event *event;
-+
-+ /*
-+ * We tolerate no failure path after event creation. It will stay
-+ * invariant for the rest of the session.
-+ */
-+ event = lttng_event_create(channel, &metadata_params, NULL, NULL);
-+ if (!event) {
-+ goto create_error;
-+ }
-+ return;
-+
-+create_error:
-+ WARN_ON(1);
-+ return; /* not allowed to return error */
-+}
-+
-+static
-+int lttng_abi_create_channel(struct file *session_file,
-+ struct lttng_kernel_channel __user *uchan_param,
-+ enum channel_type channel_type)
-+{
-+ struct lttng_session *session = session_file->private_data;
-+ const struct file_operations *fops = NULL;
-+ const char *transport_name;
-+ struct lttng_channel *chan;
-+ struct file *chan_file;
-+ struct lttng_kernel_channel chan_param;
-+ int chan_fd;
-+ int ret = 0;
-+
-+ if (copy_from_user(&chan_param, uchan_param, sizeof(chan_param)))
-+ return -EFAULT;
-+ chan_fd = get_unused_fd();
-+ if (chan_fd < 0) {
-+ ret = chan_fd;
-+ goto fd_error;
-+ }
-+ switch (channel_type) {
-+ case PER_CPU_CHANNEL:
-+ fops = &lttng_channel_fops;
-+ break;
-+ case METADATA_CHANNEL:
-+ fops = &lttng_metadata_fops;
-+ break;
-+ }
-+
-+ chan_file = anon_inode_getfile("[lttng_channel]",
-+ fops,
-+ NULL, O_RDWR);
-+ if (IS_ERR(chan_file)) {
-+ ret = PTR_ERR(chan_file);
-+ goto file_error;
-+ }
-+ switch (channel_type) {
-+ case PER_CPU_CHANNEL:
-+ if (chan_param.output == LTTNG_KERNEL_SPLICE) {
-+ transport_name = chan_param.overwrite ?
-+ "relay-overwrite" : "relay-discard";
-+ } else if (chan_param.output == LTTNG_KERNEL_MMAP) {
-+ transport_name = chan_param.overwrite ?
-+ "relay-overwrite-mmap" : "relay-discard-mmap";
-+ } else {
-+ return -EINVAL;
-+ }
-+ break;
-+ case METADATA_CHANNEL:
-+ if (chan_param.output == LTTNG_KERNEL_SPLICE)
-+ transport_name = "relay-metadata";
-+ else if (chan_param.output == LTTNG_KERNEL_MMAP)
-+ transport_name = "relay-metadata-mmap";
-+ else
-+ return -EINVAL;
-+ break;
-+ default:
-+ transport_name = "<unknown>";
-+ break;
-+ }
-+ /*
-+ * We tolerate no failure path after channel creation. It will stay
-+ * invariant for the rest of the session.
-+ */
-+ chan = lttng_channel_create(session, transport_name, NULL,
-+ chan_param.subbuf_size,
-+ chan_param.num_subbuf,
-+ chan_param.switch_timer_interval,
-+ chan_param.read_timer_interval);
-+ if (!chan) {
-+ ret = -EINVAL;
-+ goto chan_error;
-+ }
-+ chan->file = chan_file;
-+ chan_file->private_data = chan;
-+ fd_install(chan_fd, chan_file);
-+ if (channel_type == METADATA_CHANNEL) {
-+ session->metadata = chan;
-+ lttng_metadata_create_events(chan_file);
-+ }
-+
-+ /* The channel created holds a reference on the session */
-+ atomic_long_inc(&session_file->f_count);
-+
-+ return chan_fd;
-+
-+chan_error:
-+ fput(chan_file);
-+file_error:
-+ put_unused_fd(chan_fd);
-+fd_error:
-+ return ret;
-+}
-+
-+/**
-+ * lttng_session_ioctl - lttng session fd ioctl
-+ *
-+ * @file: the file
-+ * @cmd: the command
-+ * @arg: command arg
-+ *
-+ * This ioctl implements lttng commands:
-+ * LTTNG_KERNEL_CHANNEL
-+ * Returns a LTTng channel file descriptor
-+ * LTTNG_KERNEL_ENABLE
-+ * Enables tracing for a session (weak enable)
-+ * LTTNG_KERNEL_DISABLE
-+ * Disables tracing for a session (strong disable)
-+ * LTTNG_KERNEL_METADATA
-+ * Returns a LTTng metadata file descriptor
-+ *
-+ * The returned channel will be deleted when its file descriptor is closed.
-+ */
-+static
-+long lttng_session_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-+{
-+ struct lttng_session *session = file->private_data;
-+
-+ switch (cmd) {
-+ case LTTNG_KERNEL_CHANNEL:
-+ return lttng_abi_create_channel(file,
-+ (struct lttng_kernel_channel __user *) arg,
-+ PER_CPU_CHANNEL);
-+ case LTTNG_KERNEL_SESSION_START:
-+ case LTTNG_KERNEL_ENABLE:
-+ return lttng_session_enable(session);
-+ case LTTNG_KERNEL_SESSION_STOP:
-+ case LTTNG_KERNEL_DISABLE:
-+ return lttng_session_disable(session);
-+ case LTTNG_KERNEL_METADATA:
-+ return lttng_abi_create_channel(file,
-+ (struct lttng_kernel_channel __user *) arg,
-+ METADATA_CHANNEL);
-+ default:
-+ return -ENOIOCTLCMD;
-+ }
-+}
-+
-+/*
-+ * Called when the last file reference is dropped.
-+ *
-+ * Big fat note: channels and events are invariant for the whole session after
-+ * their creation. So this session destruction also destroys all channel and
-+ * event structures specific to this session (they are not destroyed when their
-+ * individual file is released).
-+ */
-+static
-+int lttng_session_release(struct inode *inode, struct file *file)
-+{
-+ struct lttng_session *session = file->private_data;
-+
-+ if (session)
-+ lttng_session_destroy(session);
-+ return 0;
-+}
-+
-+static const struct file_operations lttng_session_fops = {
-+ .owner = THIS_MODULE,
-+ .release = lttng_session_release,
-+ .unlocked_ioctl = lttng_session_ioctl,
-+#ifdef CONFIG_COMPAT
-+ .compat_ioctl = lttng_session_ioctl,
-+#endif
-+};
-+
-+static
-+int lttng_abi_open_stream(struct file *channel_file)
-+{
-+ struct lttng_channel *channel = channel_file->private_data;
-+ struct lib_ring_buffer *buf;
-+ int stream_fd, ret;
-+ struct file *stream_file;
-+
-+ buf = channel->ops->buffer_read_open(channel->chan);
-+ if (!buf)
-+ return -ENOENT;
-+
-+ stream_fd = get_unused_fd();
-+ if (stream_fd < 0) {
-+ ret = stream_fd;
-+ goto fd_error;
-+ }
-+ stream_file = anon_inode_getfile("[lttng_stream]",
-+ &lib_ring_buffer_file_operations,
-+ buf, O_RDWR);
-+ if (IS_ERR(stream_file)) {
-+ ret = PTR_ERR(stream_file);
-+ goto file_error;
-+ }
-+ /*
-+ * OPEN_FMODE, called within anon_inode_getfile/alloc_file, don't honor
-+ * FMODE_LSEEK, FMODE_PREAD nor FMODE_PWRITE. We need to read from this
-+ * file descriptor, so we set FMODE_PREAD here.
-+ */
-+ stream_file->f_mode |= FMODE_PREAD;
-+ fd_install(stream_fd, stream_file);
-+ /*
-+ * The stream holds a reference to the channel within the generic ring
-+ * buffer library, so no need to hold a refcount on the channel and
-+ * session files here.
-+ */
-+ return stream_fd;
-+
-+file_error:
-+ put_unused_fd(stream_fd);
-+fd_error:
-+ channel->ops->buffer_read_close(buf);
-+ return ret;
-+}
-+
-+static
-+int lttng_abi_create_event(struct file *channel_file,
-+ struct lttng_kernel_event __user *uevent_param)
-+{
-+ struct lttng_channel *channel = channel_file->private_data;
-+ struct lttng_event *event;
-+ struct lttng_kernel_event event_param;
-+ int event_fd, ret;
-+ struct file *event_file;
-+
-+ if (copy_from_user(&event_param, uevent_param, sizeof(event_param)))
-+ return -EFAULT;
-+ event_param.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
-+ switch (event_param.instrumentation) {
-+ case LTTNG_KERNEL_KRETPROBE:
-+ event_param.u.kretprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
-+ break;
-+ case LTTNG_KERNEL_KPROBE:
-+ event_param.u.kprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
-+ break;
-+ case LTTNG_KERNEL_FUNCTION:
-+ event_param.u.ftrace.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
-+ break;
-+ default:
-+ break;
-+ }
-+ switch (event_param.instrumentation) {
-+ default:
-+ event_fd = get_unused_fd();
-+ if (event_fd < 0) {
-+ ret = event_fd;
-+ goto fd_error;
-+ }
-+ event_file = anon_inode_getfile("[lttng_event]",
-+ &lttng_event_fops,
-+ NULL, O_RDWR);
-+ if (IS_ERR(event_file)) {
-+ ret = PTR_ERR(event_file);
-+ goto file_error;
-+ }
-+ /*
-+ * We tolerate no failure path after event creation. It
-+ * will stay invariant for the rest of the session.
-+ */
-+ event = lttng_event_create(channel, &event_param, NULL, NULL);
-+ if (!event) {
-+ ret = -EINVAL;
-+ goto event_error;
-+ }
-+ event_file->private_data = event;
-+ fd_install(event_fd, event_file);
-+ /* The event holds a reference on the channel */
-+ atomic_long_inc(&channel_file->f_count);
-+ break;
-+ case LTTNG_KERNEL_SYSCALL:
-+ /*
-+ * Only all-syscall tracing supported for now.
-+ */
-+ if (event_param.name[0] != '\0')
-+ return -EINVAL;
-+ ret = lttng_syscalls_register(channel, NULL);
-+ if (ret)
-+ goto fd_error;
-+ event_fd = 0;
-+ break;
-+ }
-+ return event_fd;
-+
-+event_error:
-+ fput(event_file);
-+file_error:
-+ put_unused_fd(event_fd);
-+fd_error:
-+ return ret;
-+}
-+
-+/**
-+ * lttng_channel_ioctl - lttng syscall through ioctl
-+ *
-+ * @file: the file
-+ * @cmd: the command
-+ * @arg: command arg
-+ *
-+ * This ioctl implements lttng commands:
-+ * LTTNG_KERNEL_STREAM
-+ * Returns an event stream file descriptor or failure.
-+ * (typically, one event stream records events from one CPU)
-+ * LTTNG_KERNEL_EVENT
-+ * Returns an event file descriptor or failure.
-+ * LTTNG_KERNEL_CONTEXT
-+ * Prepend a context field to each event in the channel
-+ * LTTNG_KERNEL_ENABLE
-+ * Enable recording for events in this channel (weak enable)
-+ * LTTNG_KERNEL_DISABLE
-+ * Disable recording for events in this channel (strong disable)
-+ *
-+ * Channel and event file descriptors also hold a reference on the session.
-+ */
-+static
-+long lttng_channel_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-+{
-+ struct lttng_channel *channel = file->private_data;
-+
-+ switch (cmd) {
-+ case LTTNG_KERNEL_STREAM:
-+ return lttng_abi_open_stream(file);
-+ case LTTNG_KERNEL_EVENT:
-+ return lttng_abi_create_event(file, (struct lttng_kernel_event __user *) arg);
-+ case LTTNG_KERNEL_CONTEXT:
-+ return lttng_abi_add_context(file,
-+ (struct lttng_kernel_context __user *) arg,
-+ &channel->ctx, channel->session);
-+ case LTTNG_KERNEL_ENABLE:
-+ return lttng_channel_enable(channel);
-+ case LTTNG_KERNEL_DISABLE:
-+ return lttng_channel_disable(channel);
-+ default:
-+ return -ENOIOCTLCMD;
-+ }
-+}
-+
-+/**
-+ * lttng_metadata_ioctl - lttng syscall through ioctl
-+ *
-+ * @file: the file
-+ * @cmd: the command
-+ * @arg: command arg
-+ *
-+ * This ioctl implements lttng commands:
-+ * LTTNG_KERNEL_STREAM
-+ * Returns an event stream file descriptor or failure.
-+ *
-+ * Channel and event file descriptors also hold a reference on the session.
-+ */
-+static
-+long lttng_metadata_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-+{
-+ switch (cmd) {
-+ case LTTNG_KERNEL_STREAM:
-+ return lttng_abi_open_stream(file);
-+ default:
-+ return -ENOIOCTLCMD;
-+ }
-+}
-+
-+/**
-+ * lttng_channel_poll - lttng stream addition/removal monitoring
-+ *
-+ * @file: the file
-+ * @wait: poll table
-+ */
-+unsigned int lttng_channel_poll(struct file *file, poll_table *wait)
-+{
-+ struct lttng_channel *channel = file->private_data;
-+ unsigned int mask = 0;
-+
-+ if (file->f_mode & FMODE_READ) {
-+ poll_wait_set_exclusive(wait);
-+ poll_wait(file, channel->ops->get_hp_wait_queue(channel->chan),
-+ wait);
-+
-+ if (channel->ops->is_disabled(channel->chan))
-+ return POLLERR;
-+ if (channel->ops->is_finalized(channel->chan))
-+ return POLLHUP;
-+ if (channel->ops->buffer_has_read_closed_stream(channel->chan))
-+ return POLLIN | POLLRDNORM;
-+ return 0;
-+ }
-+ return mask;
-+
-+}
-+
-+static
-+int lttng_channel_release(struct inode *inode, struct file *file)
-+{
-+ struct lttng_channel *channel = file->private_data;
-+
-+ if (channel)
-+ fput(channel->session->file);
-+ return 0;
-+}
-+
-+static const struct file_operations lttng_channel_fops = {
-+ .owner = THIS_MODULE,
-+ .release = lttng_channel_release,
-+ .poll = lttng_channel_poll,
-+ .unlocked_ioctl = lttng_channel_ioctl,
-+#ifdef CONFIG_COMPAT
-+ .compat_ioctl = lttng_channel_ioctl,
-+#endif
-+};
-+
-+static const struct file_operations lttng_metadata_fops = {
-+ .owner = THIS_MODULE,
-+ .release = lttng_channel_release,
-+ .unlocked_ioctl = lttng_metadata_ioctl,
-+#ifdef CONFIG_COMPAT
-+ .compat_ioctl = lttng_metadata_ioctl,
-+#endif
-+};
-+
-+/**
-+ * lttng_event_ioctl - lttng syscall through ioctl
-+ *
-+ * @file: the file
-+ * @cmd: the command
-+ * @arg: command arg
-+ *
-+ * This ioctl implements lttng commands:
-+ * LTTNG_KERNEL_CONTEXT
-+ * Prepend a context field to each record of this event
-+ * LTTNG_KERNEL_ENABLE
-+ * Enable recording for this event (weak enable)
-+ * LTTNG_KERNEL_DISABLE
-+ * Disable recording for this event (strong disable)
-+ */
-+static
-+long lttng_event_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-+{
-+ struct lttng_event *event = file->private_data;
-+
-+ switch (cmd) {
-+ case LTTNG_KERNEL_CONTEXT:
-+ return lttng_abi_add_context(file,
-+ (struct lttng_kernel_context __user *) arg,
-+ &event->ctx, event->chan->session);
-+ case LTTNG_KERNEL_ENABLE:
-+ return lttng_event_enable(event);
-+ case LTTNG_KERNEL_DISABLE:
-+ return lttng_event_disable(event);
-+ default:
-+ return -ENOIOCTLCMD;
-+ }
-+}
-+
-+static
-+int lttng_event_release(struct inode *inode, struct file *file)
-+{
-+ struct lttng_event *event = file->private_data;
-+
-+ if (event)
-+ fput(event->chan->file);
-+ return 0;
-+}
-+
-+/* TODO: filter control ioctl */
-+static const struct file_operations lttng_event_fops = {
-+ .owner = THIS_MODULE,
-+ .release = lttng_event_release,
-+ .unlocked_ioctl = lttng_event_ioctl,
-+#ifdef CONFIG_COMPAT
-+ .compat_ioctl = lttng_event_ioctl,
-+#endif
-+};
-+
-+int __init lttng_abi_init(void)
-+{
-+ int ret = 0;
-+
-+ wrapper_vmalloc_sync_all();
-+ lttng_proc_dentry = proc_create_data("lttng", S_IRUSR | S_IWUSR, NULL,
-+ &lttng_fops, NULL);
-+
-+ if (!lttng_proc_dentry) {
-+ printk(KERN_ERR "Error creating LTTng control file\n");
-+ ret = -ENOMEM;
-+ goto error;
-+ }
-+error:
-+ return ret;
-+}
-+
-+void __exit lttng_abi_exit(void)
-+{
-+ if (lttng_proc_dentry)
-+ remove_proc_entry("lttng", NULL);
-+}
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-abi.h
-@@ -0,0 +1,176 @@
-+#ifndef _LTTNG_ABI_H
-+#define _LTTNG_ABI_H
-+
-+/*
-+ * lttng-abi.h
-+ *
-+ * LTTng ABI header
-+ *
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#include <linux/fs.h>
-+
-+#define LTTNG_KERNEL_SYM_NAME_LEN 256
-+
-+enum lttng_kernel_instrumentation {
-+ LTTNG_KERNEL_TRACEPOINT = 0,
-+ LTTNG_KERNEL_KPROBE = 1,
-+ LTTNG_KERNEL_FUNCTION = 2,
-+ LTTNG_KERNEL_KRETPROBE = 3,
-+ LTTNG_KERNEL_NOOP = 4, /* not hooked */
-+ LTTNG_KERNEL_SYSCALL = 5,
-+};
-+
-+/*
-+ * LTTng consumer mode
-+ */
-+enum lttng_kernel_output {
-+ LTTNG_KERNEL_SPLICE = 0,
-+ LTTNG_KERNEL_MMAP = 1,
-+};
-+
-+/*
-+ * LTTng DebugFS ABI structures.
-+ */
-+#define LTTNG_KERNEL_CHANNEL_PADDING LTTNG_KERNEL_SYM_NAME_LEN + 32
-+struct lttng_kernel_channel {
-+ int overwrite; /* 1: overwrite, 0: discard */
-+ uint64_t subbuf_size; /* in bytes */
-+ uint64_t num_subbuf;
-+ unsigned int switch_timer_interval; /* usecs */
-+ unsigned int read_timer_interval; /* usecs */
-+ enum lttng_kernel_output output; /* splice, mmap */
-+ char padding[LTTNG_KERNEL_CHANNEL_PADDING];
-+};
-+
-+struct lttng_kernel_kretprobe {
-+ uint64_t addr;
-+
-+ uint64_t offset;
-+ char symbol_name[LTTNG_KERNEL_SYM_NAME_LEN];
-+};
-+
-+/*
-+ * Either addr is used, or symbol_name and offset.
-+ */
-+struct lttng_kernel_kprobe {
-+ uint64_t addr;
-+
-+ uint64_t offset;
-+ char symbol_name[LTTNG_KERNEL_SYM_NAME_LEN];
-+};
-+
-+struct lttng_kernel_function_tracer {
-+ char symbol_name[LTTNG_KERNEL_SYM_NAME_LEN];
-+};
-+
-+/*
-+ * For syscall tracing, name = '\0' means "enable all".
-+ */
-+#define LTTNG_KERNEL_EVENT_PADDING1 16
-+#define LTTNG_KERNEL_EVENT_PADDING2 LTTNG_KERNEL_SYM_NAME_LEN + 32
-+struct lttng_kernel_event {
-+ char name[LTTNG_KERNEL_SYM_NAME_LEN]; /* event name */
-+ enum lttng_kernel_instrumentation instrumentation;
-+ char padding[LTTNG_KERNEL_EVENT_PADDING1];
-+
-+ /* Per instrumentation type configuration */
-+ union {
-+ struct lttng_kernel_kretprobe kretprobe;
-+ struct lttng_kernel_kprobe kprobe;
-+ struct lttng_kernel_function_tracer ftrace;
-+ char padding[LTTNG_KERNEL_EVENT_PADDING2];
-+ } u;
-+};
-+
-+struct lttng_kernel_tracer_version {
-+ uint32_t major;
-+ uint32_t minor;
-+ uint32_t patchlevel;
-+};
-+
-+enum lttng_kernel_calibrate_type {
-+ LTTNG_KERNEL_CALIBRATE_KRETPROBE,
-+};
-+
-+struct lttng_kernel_calibrate {
-+ enum lttng_kernel_calibrate_type type; /* type (input) */
-+};
-+
-+enum lttng_kernel_context_type {
-+ LTTNG_KERNEL_CONTEXT_PID = 0,
-+ LTTNG_KERNEL_CONTEXT_PERF_COUNTER = 1,
-+ LTTNG_KERNEL_CONTEXT_PROCNAME = 2,
-+ LTTNG_KERNEL_CONTEXT_PRIO = 3,
-+ LTTNG_KERNEL_CONTEXT_NICE = 4,
-+ LTTNG_KERNEL_CONTEXT_VPID = 5,
-+ LTTNG_KERNEL_CONTEXT_TID = 6,
-+ LTTNG_KERNEL_CONTEXT_VTID = 7,
-+ LTTNG_KERNEL_CONTEXT_PPID = 8,
-+ LTTNG_KERNEL_CONTEXT_VPPID = 9,
-+};
-+
-+struct lttng_kernel_perf_counter_ctx {
-+ uint32_t type;
-+ uint64_t config;
-+ char name[LTTNG_KERNEL_SYM_NAME_LEN];
-+};
-+
-+#define LTTNG_KERNEL_CONTEXT_PADDING1 16
-+#define LTTNG_KERNEL_CONTEXT_PADDING2 LTTNG_KERNEL_SYM_NAME_LEN + 32
-+struct lttng_kernel_context {
-+ enum lttng_kernel_context_type ctx;
-+ char padding[LTTNG_KERNEL_CONTEXT_PADDING1];
-+
-+ union {
-+ struct lttng_kernel_perf_counter_ctx perf_counter;
-+ char padding[LTTNG_KERNEL_CONTEXT_PADDING2];
-+ } u;
-+};
-+
-+/* LTTng file descriptor ioctl */
-+#define LTTNG_KERNEL_SESSION _IO(0xF6, 0x40)
-+#define LTTNG_KERNEL_TRACER_VERSION \
-+ _IOR(0xF6, 0x41, struct lttng_kernel_tracer_version)
-+#define LTTNG_KERNEL_TRACEPOINT_LIST _IO(0xF6, 0x42)
-+#define LTTNG_KERNEL_WAIT_QUIESCENT _IO(0xF6, 0x43)
-+#define LTTNG_KERNEL_CALIBRATE \
-+ _IOWR(0xF6, 0x44, struct lttng_kernel_calibrate)
-+
-+/* Session FD ioctl */
-+#define LTTNG_KERNEL_METADATA \
-+ _IOW(0xF6, 0x50, struct lttng_kernel_channel)
-+#define LTTNG_KERNEL_CHANNEL \
-+ _IOW(0xF6, 0x51, struct lttng_kernel_channel)
-+#define LTTNG_KERNEL_SESSION_START _IO(0xF6, 0x52)
-+#define LTTNG_KERNEL_SESSION_STOP _IO(0xF6, 0x53)
-+
-+/* Channel FD ioctl */
-+#define LTTNG_KERNEL_STREAM _IO(0xF6, 0x60)
-+#define LTTNG_KERNEL_EVENT \
-+ _IOW(0xF6, 0x61, struct lttng_kernel_event)
-+
-+/* Event and Channel FD ioctl */
-+#define LTTNG_KERNEL_CONTEXT \
-+ _IOW(0xF6, 0x70, struct lttng_kernel_context)
-+
-+/* Event, Channel and Session ioctl */
-+#define LTTNG_KERNEL_ENABLE _IO(0xF6, 0x80)
-+#define LTTNG_KERNEL_DISABLE _IO(0xF6, 0x81)
-+
-+#endif /* _LTTNG_ABI_H */
---- a/drivers/staging/lttng/lttng-calibrate.c
-+++ b/drivers/staging/lttng/lttng-calibrate.c
-@@ -1,15 +1,27 @@
- /*
- * lttng-calibrate.c
- *
-- * Copyright 2011 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
- * LTTng probe calibration.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
--#include "ltt-debugfs-abi.h"
--#include "ltt-events.h"
-+#include "lttng-abi.h"
-+#include "lttng-events.h"
-
- noinline
- void lttng_calibrate_kretprobe(void)
---- a/drivers/staging/lttng/lttng-context-nice.c
-+++ b/drivers/staging/lttng/lttng-context-nice.c
-@@ -1,26 +1,39 @@
- /*
-- * (C) Copyright 2009-2011 -
-- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * lttng-context-nice.c
- *
- * LTTng nice context.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/module.h>
- #include <linux/slab.h>
- #include <linux/sched.h>
--#include "ltt-events.h"
-+#include "lttng-events.h"
- #include "wrapper/ringbuffer/frontend_types.h"
- #include "wrapper/vmalloc.h"
--#include "ltt-tracer.h"
-+#include "lttng-tracer.h"
-
- static
- size_t nice_get_size(size_t offset)
- {
- size_t size = 0;
-
-- size += lib_ring_buffer_align(offset, ltt_alignof(int));
-+ size += lib_ring_buffer_align(offset, lttng_alignof(int));
- size += sizeof(int);
- return size;
- }
-@@ -28,12 +41,12 @@ size_t nice_get_size(size_t offset)
- static
- void nice_record(struct lttng_ctx_field *field,
- struct lib_ring_buffer_ctx *ctx,
-- struct ltt_channel *chan)
-+ struct lttng_channel *chan)
- {
- int nice;
-
- nice = task_nice(current);
-- lib_ring_buffer_align_ctx(ctx, ltt_alignof(nice));
-+ lib_ring_buffer_align_ctx(ctx, lttng_alignof(nice));
- chan->ops->event_write(ctx, &nice, sizeof(nice));
- }
-
-@@ -51,7 +64,7 @@ int lttng_add_nice_to_ctx(struct lttng_c
- field->event_field.name = "nice";
- field->event_field.type.atype = atype_integer;
- field->event_field.type.u.basic.integer.size = sizeof(int) * CHAR_BIT;
-- field->event_field.type.u.basic.integer.alignment = ltt_alignof(int) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.alignment = lttng_alignof(int) * CHAR_BIT;
- field->event_field.type.u.basic.integer.signedness = is_signed_type(int);
- field->event_field.type.u.basic.integer.reverse_byte_order = 0;
- field->event_field.type.u.basic.integer.base = 10;
---- a/drivers/staging/lttng/lttng-context-perf-counters.c
-+++ b/drivers/staging/lttng/lttng-context-perf-counters.c
-@@ -1,10 +1,23 @@
- /*
-- * (C) Copyright 2009-2011 -
-- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * lttng-context-perf-counters.c
- *
- * LTTng performance monitoring counters (perf-counters) integration module.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/module.h>
-@@ -12,18 +25,18 @@
- #include <linux/perf_event.h>
- #include <linux/list.h>
- #include <linux/string.h>
--#include "ltt-events.h"
-+#include "lttng-events.h"
- #include "wrapper/ringbuffer/frontend_types.h"
- #include "wrapper/vmalloc.h"
- #include "wrapper/perf.h"
--#include "ltt-tracer.h"
-+#include "lttng-tracer.h"
-
- static
- size_t perf_counter_get_size(size_t offset)
- {
- size_t size = 0;
-
-- size += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
-+ size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
- size += sizeof(uint64_t);
- return size;
- }
-@@ -31,7 +44,7 @@ size_t perf_counter_get_size(size_t offs
- static
- void perf_counter_record(struct lttng_ctx_field *field,
- struct lib_ring_buffer_ctx *ctx,
-- struct ltt_channel *chan)
-+ struct lttng_channel *chan)
- {
- struct perf_event *event;
- uint64_t value;
-@@ -54,7 +67,7 @@ void perf_counter_record(struct lttng_ct
- */
- value = 0;
- }
-- lib_ring_buffer_align_ctx(ctx, ltt_alignof(value));
-+ lib_ring_buffer_align_ctx(ctx, lttng_alignof(value));
- chan->ops->event_write(ctx, &value, sizeof(value));
- }
-
-@@ -230,7 +243,7 @@ int lttng_add_perf_counter_to_ctx(uint32
- field->event_field.name = name_alloc;
- field->event_field.type.atype = atype_integer;
- field->event_field.type.u.basic.integer.size = sizeof(uint64_t) * CHAR_BIT;
-- field->event_field.type.u.basic.integer.alignment = ltt_alignof(uint64_t) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.alignment = lttng_alignof(uint64_t) * CHAR_BIT;
- field->event_field.type.u.basic.integer.signedness = is_signed_type(uint64_t);
- field->event_field.type.u.basic.integer.reverse_byte_order = 0;
- field->event_field.type.u.basic.integer.base = 10;
---- a/drivers/staging/lttng/lttng-context-pid.c
-+++ b/drivers/staging/lttng/lttng-context-pid.c
-@@ -1,26 +1,39 @@
- /*
-- * (C) Copyright 2009-2011 -
-- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * lttng-context-pid.c
- *
- * LTTng PID context.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/module.h>
- #include <linux/slab.h>
- #include <linux/sched.h>
--#include "ltt-events.h"
-+#include "lttng-events.h"
- #include "wrapper/ringbuffer/frontend_types.h"
- #include "wrapper/vmalloc.h"
--#include "ltt-tracer.h"
-+#include "lttng-tracer.h"
-
- static
- size_t pid_get_size(size_t offset)
- {
- size_t size = 0;
-
-- size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
-+ size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
- size += sizeof(pid_t);
- return size;
- }
-@@ -28,12 +41,12 @@ size_t pid_get_size(size_t offset)
- static
- void pid_record(struct lttng_ctx_field *field,
- struct lib_ring_buffer_ctx *ctx,
-- struct ltt_channel *chan)
-+ struct lttng_channel *chan)
- {
- pid_t pid;
-
- pid = task_tgid_nr(current);
-- lib_ring_buffer_align_ctx(ctx, ltt_alignof(pid));
-+ lib_ring_buffer_align_ctx(ctx, lttng_alignof(pid));
- chan->ops->event_write(ctx, &pid, sizeof(pid));
- }
-
-@@ -51,7 +64,7 @@ int lttng_add_pid_to_ctx(struct lttng_ct
- field->event_field.name = "pid";
- field->event_field.type.atype = atype_integer;
- field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
-- field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
- field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
- field->event_field.type.u.basic.integer.reverse_byte_order = 0;
- field->event_field.type.u.basic.integer.base = 10;
---- a/drivers/staging/lttng/lttng-context-ppid.c
-+++ b/drivers/staging/lttng/lttng-context-ppid.c
-@@ -1,27 +1,40 @@
- /*
-- * (C) Copyright 2009-2011 -
-- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * lttng-context-ppid.c
- *
- * LTTng PPID context.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/module.h>
- #include <linux/slab.h>
- #include <linux/sched.h>
- #include <linux/syscalls.h>
--#include "ltt-events.h"
-+#include "lttng-events.h"
- #include "wrapper/ringbuffer/frontend_types.h"
- #include "wrapper/vmalloc.h"
--#include "ltt-tracer.h"
-+#include "lttng-tracer.h"
-
- static
- size_t ppid_get_size(size_t offset)
- {
- size_t size = 0;
-
-- size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
-+ size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
- size += sizeof(pid_t);
- return size;
- }
-@@ -29,14 +42,14 @@ size_t ppid_get_size(size_t offset)
- static
- void ppid_record(struct lttng_ctx_field *field,
- struct lib_ring_buffer_ctx *ctx,
-- struct ltt_channel *chan)
-+ struct lttng_channel *chan)
- {
- pid_t ppid;
-
- rcu_read_lock();
- ppid = task_tgid_nr(current->real_parent);
- rcu_read_unlock();
-- lib_ring_buffer_align_ctx(ctx, ltt_alignof(ppid));
-+ lib_ring_buffer_align_ctx(ctx, lttng_alignof(ppid));
- chan->ops->event_write(ctx, &ppid, sizeof(ppid));
- }
-
-@@ -54,7 +67,7 @@ int lttng_add_ppid_to_ctx(struct lttng_c
- field->event_field.name = "ppid";
- field->event_field.type.atype = atype_integer;
- field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
-- field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
- field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
- field->event_field.type.u.basic.integer.reverse_byte_order = 0;
- field->event_field.type.u.basic.integer.base = 10;
---- a/drivers/staging/lttng/lttng-context-prio.c
-+++ b/drivers/staging/lttng/lttng-context-prio.c
-@@ -1,20 +1,33 @@
- /*
-- * (C) Copyright 2009-2011 -
-- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * lttng-context-prio.c
- *
- * LTTng priority context.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/module.h>
- #include <linux/slab.h>
- #include <linux/sched.h>
--#include "ltt-events.h"
-+#include "lttng-events.h"
- #include "wrapper/ringbuffer/frontend_types.h"
- #include "wrapper/vmalloc.h"
- #include "wrapper/kallsyms.h"
--#include "ltt-tracer.h"
-+#include "lttng-tracer.h"
-
- static
- int (*wrapper_task_prio_sym)(struct task_struct *t);
-@@ -34,7 +47,7 @@ size_t prio_get_size(size_t offset)
- {
- size_t size = 0;
-
-- size += lib_ring_buffer_align(offset, ltt_alignof(int));
-+ size += lib_ring_buffer_align(offset, lttng_alignof(int));
- size += sizeof(int);
- return size;
- }
-@@ -42,12 +55,12 @@ size_t prio_get_size(size_t offset)
- static
- void prio_record(struct lttng_ctx_field *field,
- struct lib_ring_buffer_ctx *ctx,
-- struct ltt_channel *chan)
-+ struct lttng_channel *chan)
- {
- int prio;
-
- prio = wrapper_task_prio_sym(current);
-- lib_ring_buffer_align_ctx(ctx, ltt_alignof(prio));
-+ lib_ring_buffer_align_ctx(ctx, lttng_alignof(prio));
- chan->ops->event_write(ctx, &prio, sizeof(prio));
- }
-
-@@ -72,7 +85,7 @@ int lttng_add_prio_to_ctx(struct lttng_c
- field->event_field.name = "prio";
- field->event_field.type.atype = atype_integer;
- field->event_field.type.u.basic.integer.size = sizeof(int) * CHAR_BIT;
-- field->event_field.type.u.basic.integer.alignment = ltt_alignof(int) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.alignment = lttng_alignof(int) * CHAR_BIT;
- field->event_field.type.u.basic.integer.signedness = is_signed_type(int);
- field->event_field.type.u.basic.integer.reverse_byte_order = 0;
- field->event_field.type.u.basic.integer.base = 10;
---- a/drivers/staging/lttng/lttng-context-procname.c
-+++ b/drivers/staging/lttng/lttng-context-procname.c
-@@ -1,19 +1,32 @@
- /*
-- * (C) Copyright 2009-2011 -
-- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * lttng-context-procname.c
- *
- * LTTng procname context.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/module.h>
- #include <linux/slab.h>
- #include <linux/sched.h>
--#include "ltt-events.h"
-+#include "lttng-events.h"
- #include "wrapper/ringbuffer/frontend_types.h"
- #include "wrapper/vmalloc.h"
--#include "ltt-tracer.h"
-+#include "lttng-tracer.h"
-
- static
- size_t procname_get_size(size_t offset)
-@@ -33,7 +46,7 @@ size_t procname_get_size(size_t offset)
- static
- void procname_record(struct lttng_ctx_field *field,
- struct lib_ring_buffer_ctx *ctx,
-- struct ltt_channel *chan)
-+ struct lttng_channel *chan)
- {
- chan->ops->event_write(ctx, current->comm, sizeof(current->comm));
- }
-@@ -53,7 +66,7 @@ int lttng_add_procname_to_ctx(struct ltt
- field->event_field.type.atype = atype_array;
- field->event_field.type.u.array.elem_type.atype = atype_integer;
- field->event_field.type.u.array.elem_type.u.basic.integer.size = sizeof(char) * CHAR_BIT;
-- field->event_field.type.u.array.elem_type.u.basic.integer.alignment = ltt_alignof(char) * CHAR_BIT;
-+ field->event_field.type.u.array.elem_type.u.basic.integer.alignment = lttng_alignof(char) * CHAR_BIT;
- field->event_field.type.u.array.elem_type.u.basic.integer.signedness = is_signed_type(char);
- field->event_field.type.u.array.elem_type.u.basic.integer.reverse_byte_order = 0;
- field->event_field.type.u.array.elem_type.u.basic.integer.base = 10;
---- a/drivers/staging/lttng/lttng-context-tid.c
-+++ b/drivers/staging/lttng/lttng-context-tid.c
-@@ -1,26 +1,39 @@
- /*
-- * (C) Copyright 2009-2011 -
-- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * lttng-context-tid.c
- *
- * LTTng TID context.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/module.h>
- #include <linux/slab.h>
- #include <linux/sched.h>
--#include "ltt-events.h"
-+#include "lttng-events.h"
- #include "wrapper/ringbuffer/frontend_types.h"
- #include "wrapper/vmalloc.h"
--#include "ltt-tracer.h"
-+#include "lttng-tracer.h"
-
- static
- size_t tid_get_size(size_t offset)
- {
- size_t size = 0;
-
-- size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
-+ size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
- size += sizeof(pid_t);
- return size;
- }
-@@ -28,12 +41,12 @@ size_t tid_get_size(size_t offset)
- static
- void tid_record(struct lttng_ctx_field *field,
- struct lib_ring_buffer_ctx *ctx,
-- struct ltt_channel *chan)
-+ struct lttng_channel *chan)
- {
- pid_t tid;
-
- tid = task_pid_nr(current);
-- lib_ring_buffer_align_ctx(ctx, ltt_alignof(tid));
-+ lib_ring_buffer_align_ctx(ctx, lttng_alignof(tid));
- chan->ops->event_write(ctx, &tid, sizeof(tid));
- }
-
-@@ -51,7 +64,7 @@ int lttng_add_tid_to_ctx(struct lttng_ct
- field->event_field.name = "tid";
- field->event_field.type.atype = atype_integer;
- field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
-- field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
- field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
- field->event_field.type.u.basic.integer.reverse_byte_order = 0;
- field->event_field.type.u.basic.integer.base = 10;
---- a/drivers/staging/lttng/lttng-context-vpid.c
-+++ b/drivers/staging/lttng/lttng-context-vpid.c
-@@ -1,26 +1,39 @@
- /*
-- * (C) Copyright 2009-2011 -
-- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * lttng-context-vpid.c
- *
- * LTTng vPID context.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/module.h>
- #include <linux/slab.h>
- #include <linux/sched.h>
--#include "ltt-events.h"
-+#include "lttng-events.h"
- #include "wrapper/ringbuffer/frontend_types.h"
- #include "wrapper/vmalloc.h"
--#include "ltt-tracer.h"
-+#include "lttng-tracer.h"
-
- static
- size_t vpid_get_size(size_t offset)
- {
- size_t size = 0;
-
-- size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
-+ size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
- size += sizeof(pid_t);
- return size;
- }
-@@ -28,7 +41,7 @@ size_t vpid_get_size(size_t offset)
- static
- void vpid_record(struct lttng_ctx_field *field,
- struct lib_ring_buffer_ctx *ctx,
-- struct ltt_channel *chan)
-+ struct lttng_channel *chan)
- {
- pid_t vpid;
-
-@@ -39,7 +52,7 @@ void vpid_record(struct lttng_ctx_field
- vpid = 0;
- else
- vpid = task_tgid_vnr(current);
-- lib_ring_buffer_align_ctx(ctx, ltt_alignof(vpid));
-+ lib_ring_buffer_align_ctx(ctx, lttng_alignof(vpid));
- chan->ops->event_write(ctx, &vpid, sizeof(vpid));
- }
-
-@@ -57,7 +70,7 @@ int lttng_add_vpid_to_ctx(struct lttng_c
- field->event_field.name = "vpid";
- field->event_field.type.atype = atype_integer;
- field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
-- field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
- field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
- field->event_field.type.u.basic.integer.reverse_byte_order = 0;
- field->event_field.type.u.basic.integer.base = 10;
---- a/drivers/staging/lttng/lttng-context-vppid.c
-+++ b/drivers/staging/lttng/lttng-context-vppid.c
-@@ -1,27 +1,40 @@
- /*
-- * (C) Copyright 2009-2011 -
-- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * lttng-context-vppid.c
- *
- * LTTng vPPID context.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/module.h>
- #include <linux/slab.h>
- #include <linux/sched.h>
- #include <linux/syscalls.h>
--#include "ltt-events.h"
-+#include "lttng-events.h"
- #include "wrapper/ringbuffer/frontend_types.h"
- #include "wrapper/vmalloc.h"
--#include "ltt-tracer.h"
-+#include "lttng-tracer.h"
-
- static
- size_t vppid_get_size(size_t offset)
- {
- size_t size = 0;
-
-- size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
-+ size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
- size += sizeof(pid_t);
- return size;
- }
-@@ -29,7 +42,7 @@ size_t vppid_get_size(size_t offset)
- static
- void vppid_record(struct lttng_ctx_field *field,
- struct lib_ring_buffer_ctx *ctx,
-- struct ltt_channel *chan)
-+ struct lttng_channel *chan)
- {
- struct task_struct *parent;
- pid_t vppid;
-@@ -44,7 +57,7 @@ void vppid_record(struct lttng_ctx_field
- else
- vppid = task_tgid_vnr(parent);
- rcu_read_unlock();
-- lib_ring_buffer_align_ctx(ctx, ltt_alignof(vppid));
-+ lib_ring_buffer_align_ctx(ctx, lttng_alignof(vppid));
- chan->ops->event_write(ctx, &vppid, sizeof(vppid));
- }
-
-@@ -62,7 +75,7 @@ int lttng_add_vppid_to_ctx(struct lttng_
- field->event_field.name = "vppid";
- field->event_field.type.atype = atype_integer;
- field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
-- field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
- field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
- field->event_field.type.u.basic.integer.reverse_byte_order = 0;
- field->event_field.type.u.basic.integer.base = 10;
---- a/drivers/staging/lttng/lttng-context-vtid.c
-+++ b/drivers/staging/lttng/lttng-context-vtid.c
-@@ -1,26 +1,39 @@
- /*
-- * (C) Copyright 2009-2011 -
-- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * lttng-context-vtid.c
- *
- * LTTng vTID context.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/module.h>
- #include <linux/slab.h>
- #include <linux/sched.h>
--#include "ltt-events.h"
-+#include "lttng-events.h"
- #include "wrapper/ringbuffer/frontend_types.h"
- #include "wrapper/vmalloc.h"
--#include "ltt-tracer.h"
-+#include "lttng-tracer.h"
-
- static
- size_t vtid_get_size(size_t offset)
- {
- size_t size = 0;
-
-- size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
-+ size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
- size += sizeof(pid_t);
- return size;
- }
-@@ -28,7 +41,7 @@ size_t vtid_get_size(size_t offset)
- static
- void vtid_record(struct lttng_ctx_field *field,
- struct lib_ring_buffer_ctx *ctx,
-- struct ltt_channel *chan)
-+ struct lttng_channel *chan)
- {
- pid_t vtid;
-
-@@ -39,7 +52,7 @@ void vtid_record(struct lttng_ctx_field
- vtid = 0;
- else
- vtid = task_pid_vnr(current);
-- lib_ring_buffer_align_ctx(ctx, ltt_alignof(vtid));
-+ lib_ring_buffer_align_ctx(ctx, lttng_alignof(vtid));
- chan->ops->event_write(ctx, &vtid, sizeof(vtid));
- }
-
-@@ -57,7 +70,7 @@ int lttng_add_vtid_to_ctx(struct lttng_c
- field->event_field.name = "vtid";
- field->event_field.type.atype = atype_integer;
- field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
-- field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
-+ field->event_field.type.u.basic.integer.alignment = lttng_alignof(pid_t) * CHAR_BIT;
- field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
- field->event_field.type.u.basic.integer.reverse_byte_order = 0;
- field->event_field.type.u.basic.integer.base = 10;
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-context.c
-@@ -0,0 +1,105 @@
-+/*
-+ * lttng-context.c
-+ *
-+ * LTTng trace/channel/event context management.
-+ *
-+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/list.h>
-+#include <linux/mutex.h>
-+#include <linux/slab.h>
-+#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
-+#include "lttng-events.h"
-+#include "lttng-tracer.h"
-+
-+int lttng_find_context(struct lttng_ctx *ctx, const char *name)
-+{
-+ unsigned int i;
-+
-+ for (i = 0; i < ctx->nr_fields; i++) {
-+ /* Skip allocated (but non-initialized) contexts */
-+ if (!ctx->fields[i].event_field.name)
-+ continue;
-+ if (!strcmp(ctx->fields[i].event_field.name, name))
-+ return 1;
-+ }
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(lttng_find_context);
-+
-+/*
-+ * Note: as we append context information, the pointer location may change.
-+ */
-+struct lttng_ctx_field *lttng_append_context(struct lttng_ctx **ctx_p)
-+{
-+ struct lttng_ctx_field *field;
-+ struct lttng_ctx *ctx;
-+
-+ if (!*ctx_p) {
-+ *ctx_p = kzalloc(sizeof(struct lttng_ctx), GFP_KERNEL);
-+ if (!*ctx_p)
-+ return NULL;
-+ }
-+ ctx = *ctx_p;
-+ if (ctx->nr_fields + 1 > ctx->allocated_fields) {
-+ struct lttng_ctx_field *new_fields;
-+
-+ ctx->allocated_fields = max_t(size_t, 1, 2 * ctx->allocated_fields);
-+ new_fields = kzalloc(ctx->allocated_fields * sizeof(struct lttng_ctx_field), GFP_KERNEL);
-+ if (!new_fields)
-+ return NULL;
-+ if (ctx->fields)
-+ memcpy(new_fields, ctx->fields, sizeof(*ctx->fields) * ctx->nr_fields);
-+ kfree(ctx->fields);
-+ ctx->fields = new_fields;
-+ }
-+ field = &ctx->fields[ctx->nr_fields];
-+ ctx->nr_fields++;
-+ return field;
-+}
-+EXPORT_SYMBOL_GPL(lttng_append_context);
-+
-+/*
-+ * Remove last context field.
-+ */
-+void lttng_remove_context_field(struct lttng_ctx **ctx_p,
-+ struct lttng_ctx_field *field)
-+{
-+ struct lttng_ctx *ctx;
-+
-+ ctx = *ctx_p;
-+ ctx->nr_fields--;
-+ WARN_ON_ONCE(&ctx->fields[ctx->nr_fields] != field);
-+ memset(&ctx->fields[ctx->nr_fields], 0, sizeof(struct lttng_ctx_field));
-+}
-+EXPORT_SYMBOL_GPL(lttng_remove_context_field);
-+
-+void lttng_destroy_context(struct lttng_ctx *ctx)
-+{
-+ int i;
-+
-+ if (!ctx)
-+ return;
-+ for (i = 0; i < ctx->nr_fields; i++) {
-+ if (ctx->fields[i].destroy)
-+ ctx->fields[i].destroy(&ctx->fields[i]);
-+ }
-+ kfree(ctx->fields);
-+ kfree(ctx);
-+}
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-endian.h
-@@ -0,0 +1,43 @@
-+#ifndef _LTTNG_ENDIAN_H
-+#define _LTTNG_ENDIAN_H
-+
-+/*
-+ * lttng-endian.h
-+ *
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#ifdef __KERNEL__
-+# include <asm/byteorder.h>
-+# ifdef __BIG_ENDIAN
-+# define __BYTE_ORDER __BIG_ENDIAN
-+# elif defined(__LITTLE_ENDIAN)
-+# define __BYTE_ORDER __LITTLE_ENDIAN
-+# else
-+# error "unknown endianness"
-+# endif
-+#ifndef __BIG_ENDIAN
-+# define __BIG_ENDIAN 4321
-+#endif
-+#ifndef __LITTLE_ENDIAN
-+# define __LITTLE_ENDIAN 1234
-+#endif
-+#else
-+# include <endian.h>
-+#endif
-+
-+#endif /* _LTTNG_ENDIAN_H */
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-events.c
-@@ -0,0 +1,1126 @@
-+/*
-+ * lttng-events.c
-+ *
-+ * Holds LTTng per-session event registry.
-+ *
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/list.h>
-+#include <linux/mutex.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/jiffies.h>
-+#include <linux/utsname.h>
-+#include "wrapper/uuid.h"
-+#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
-+#include "wrapper/random.h"
-+#include "lttng-events.h"
-+#include "lttng-tracer.h"
-+
-+static LIST_HEAD(sessions);
-+static LIST_HEAD(lttng_transport_list);
-+static DEFINE_MUTEX(sessions_mutex);
-+static struct kmem_cache *event_cache;
-+
-+static void _lttng_event_destroy(struct lttng_event *event);
-+static void _lttng_channel_destroy(struct lttng_channel *chan);
-+static int _lttng_event_unregister(struct lttng_event *event);
-+static
-+int _lttng_event_metadata_statedump(struct lttng_session *session,
-+ struct lttng_channel *chan,
-+ struct lttng_event *event);
-+static
-+int _lttng_session_metadata_statedump(struct lttng_session *session);
-+
-+void synchronize_trace(void)
-+{
-+ synchronize_sched();
-+#ifdef CONFIG_PREEMPT_RT
-+ synchronize_rcu();
-+#endif
-+}
-+
-+struct lttng_session *lttng_session_create(void)
-+{
-+ struct lttng_session *session;
-+
-+ mutex_lock(&sessions_mutex);
-+ session = kzalloc(sizeof(struct lttng_session), GFP_KERNEL);
-+ if (!session)
-+ return NULL;
-+ INIT_LIST_HEAD(&session->chan);
-+ INIT_LIST_HEAD(&session->events);
-+ uuid_le_gen(&session->uuid);
-+ list_add(&session->list, &sessions);
-+ mutex_unlock(&sessions_mutex);
-+ return session;
-+}
-+
-+void lttng_session_destroy(struct lttng_session *session)
-+{
-+ struct lttng_channel *chan, *tmpchan;
-+ struct lttng_event *event, *tmpevent;
-+ int ret;
-+
-+ mutex_lock(&sessions_mutex);
-+ ACCESS_ONCE(session->active) = 0;
-+ list_for_each_entry(chan, &session->chan, list) {
-+ ret = lttng_syscalls_unregister(chan);
-+ WARN_ON(ret);
-+ }
-+ list_for_each_entry(event, &session->events, list) {
-+ ret = _lttng_event_unregister(event);
-+ WARN_ON(ret);
-+ }
-+ synchronize_trace(); /* Wait for in-flight events to complete */
-+ list_for_each_entry_safe(event, tmpevent, &session->events, list)
-+ _lttng_event_destroy(event);
-+ list_for_each_entry_safe(chan, tmpchan, &session->chan, list)
-+ _lttng_channel_destroy(chan);
-+ list_del(&session->list);
-+ mutex_unlock(&sessions_mutex);
-+ kfree(session);
-+}
-+
-+int lttng_session_enable(struct lttng_session *session)
-+{
-+ int ret = 0;
-+ struct lttng_channel *chan;
-+
-+ mutex_lock(&sessions_mutex);
-+ if (session->active) {
-+ ret = -EBUSY;
-+ goto end;
-+ }
-+
-+ /*
-+ * Snapshot the number of events per channel to know the type of header
-+ * we need to use.
-+ */
-+ list_for_each_entry(chan, &session->chan, list) {
-+ if (chan->header_type)
-+ continue; /* don't change it if session stop/restart */
-+ if (chan->free_event_id < 31)
-+ chan->header_type = 1; /* compact */
-+ else
-+ chan->header_type = 2; /* large */
-+ }
-+
-+ ACCESS_ONCE(session->active) = 1;
-+ ACCESS_ONCE(session->been_active) = 1;
-+ ret = _lttng_session_metadata_statedump(session);
-+ if (ret) {
-+ ACCESS_ONCE(session->active) = 0;
-+ goto end;
-+ }
-+ ret = lttng_statedump_start(session);
-+ if (ret)
-+ ACCESS_ONCE(session->active) = 0;
-+end:
-+ mutex_unlock(&sessions_mutex);
-+ return ret;
-+}
-+
-+int lttng_session_disable(struct lttng_session *session)
-+{
-+ int ret = 0;
-+
-+ mutex_lock(&sessions_mutex);
-+ if (!session->active) {
-+ ret = -EBUSY;
-+ goto end;
-+ }
-+ ACCESS_ONCE(session->active) = 0;
-+end:
-+ mutex_unlock(&sessions_mutex);
-+ return ret;
-+}
-+
-+int lttng_channel_enable(struct lttng_channel *channel)
-+{
-+ int old;
-+
-+ if (channel == channel->session->metadata)
-+ return -EPERM;
-+ old = xchg(&channel->enabled, 1);
-+ if (old)
-+ return -EEXIST;
-+ return 0;
-+}
-+
-+int lttng_channel_disable(struct lttng_channel *channel)
-+{
-+ int old;
-+
-+ if (channel == channel->session->metadata)
-+ return -EPERM;
-+ old = xchg(&channel->enabled, 0);
-+ if (!old)
-+ return -EEXIST;
-+ return 0;
-+}
-+
-+int lttng_event_enable(struct lttng_event *event)
-+{
-+ int old;
-+
-+ if (event->chan == event->chan->session->metadata)
-+ return -EPERM;
-+ old = xchg(&event->enabled, 1);
-+ if (old)
-+ return -EEXIST;
-+ return 0;
-+}
-+
-+int lttng_event_disable(struct lttng_event *event)
-+{
-+ int old;
-+
-+ if (event->chan == event->chan->session->metadata)
-+ return -EPERM;
-+ old = xchg(&event->enabled, 0);
-+ if (!old)
-+ return -EEXIST;
-+ return 0;
-+}
-+
-+static struct lttng_transport *lttng_transport_find(const char *name)
-+{
-+ struct lttng_transport *transport;
-+
-+ list_for_each_entry(transport, &lttng_transport_list, node) {
-+ if (!strcmp(transport->name, name))
-+ return transport;
-+ }
-+ return NULL;
-+}
-+
-+struct lttng_channel *lttng_channel_create(struct lttng_session *session,
-+ const char *transport_name,
-+ void *buf_addr,
-+ size_t subbuf_size, size_t num_subbuf,
-+ unsigned int switch_timer_interval,
-+ unsigned int read_timer_interval)
-+{
-+ struct lttng_channel *chan;
-+ struct lttng_transport *transport = NULL;
-+
-+ mutex_lock(&sessions_mutex);
-+ if (session->been_active)
-+ goto active; /* Refuse to add channel to active session */
-+ transport = lttng_transport_find(transport_name);
-+ if (!transport) {
-+ printk(KERN_WARNING "LTTng transport %s not found\n",
-+ transport_name);
-+ goto notransport;
-+ }
-+ if (!try_module_get(transport->owner)) {
-+ printk(KERN_WARNING "LTT : Can't lock transport module.\n");
-+ goto notransport;
-+ }
-+ chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
-+ if (!chan)
-+ goto nomem;
-+ chan->session = session;
-+ chan->id = session->free_chan_id++;
-+ /*
-+ * Note: the channel creation op already writes into the packet
-+ * headers. Therefore the "chan" information used as input
-+ * should be already accessible.
-+ */
-+ chan->chan = transport->ops.channel_create(transport_name,
-+ chan, buf_addr, subbuf_size, num_subbuf,
-+ switch_timer_interval, read_timer_interval);
-+ if (!chan->chan)
-+ goto create_error;
-+ chan->enabled = 1;
-+ chan->ops = &transport->ops;
-+ chan->transport = transport;
-+ list_add(&chan->list, &session->chan);
-+ mutex_unlock(&sessions_mutex);
-+ return chan;
-+
-+create_error:
-+ kfree(chan);
-+nomem:
-+ if (transport)
-+ module_put(transport->owner);
-+notransport:
-+active:
-+ mutex_unlock(&sessions_mutex);
-+ return NULL;
-+}
-+
-+/*
-+ * Only used internally at session destruction.
-+ */
-+static
-+void _lttng_channel_destroy(struct lttng_channel *chan)
-+{
-+ chan->ops->channel_destroy(chan->chan);
-+ module_put(chan->transport->owner);
-+ list_del(&chan->list);
-+ lttng_destroy_context(chan->ctx);
-+ kfree(chan);
-+}
-+
-+/*
-+ * Supports event creation while tracing session is active.
-+ */
-+struct lttng_event *lttng_event_create(struct lttng_channel *chan,
-+ struct lttng_kernel_event *event_param,
-+ void *filter,
-+ const struct lttng_event_desc *internal_desc)
-+{
-+ struct lttng_event *event;
-+ int ret;
-+
-+ mutex_lock(&sessions_mutex);
-+ if (chan->free_event_id == -1UL)
-+ goto full;
-+ /*
-+ * This is O(n^2) (for each event, the loop is called at event
-+ * creation). Might require a hash if we have lots of events.
-+ */
-+ list_for_each_entry(event, &chan->session->events, list)
-+ if (!strcmp(event->desc->name, event_param->name))
-+ goto exist;
-+ event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
-+ if (!event)
-+ goto cache_error;
-+ event->chan = chan;
-+ event->filter = filter;
-+ event->id = chan->free_event_id++;
-+ event->enabled = 1;
-+ event->instrumentation = event_param->instrumentation;
-+ /* Populate lttng_event structure before tracepoint registration. */
-+ smp_wmb();
-+ switch (event_param->instrumentation) {
-+ case LTTNG_KERNEL_TRACEPOINT:
-+ event->desc = lttng_event_get(event_param->name);
-+ if (!event->desc)
-+ goto register_error;
-+ ret = tracepoint_probe_register(event_param->name,
-+ event->desc->probe_callback,
-+ event);
-+ if (ret)
-+ goto register_error;
-+ break;
-+ case LTTNG_KERNEL_KPROBE:
-+ ret = lttng_kprobes_register(event_param->name,
-+ event_param->u.kprobe.symbol_name,
-+ event_param->u.kprobe.offset,
-+ event_param->u.kprobe.addr,
-+ event);
-+ if (ret)
-+ goto register_error;
-+ ret = try_module_get(event->desc->owner);
-+ WARN_ON_ONCE(!ret);
-+ break;
-+ case LTTNG_KERNEL_KRETPROBE:
-+ {
-+ struct lttng_event *event_return;
-+
-+ /* kretprobe defines 2 events */
-+ event_return =
-+ kmem_cache_zalloc(event_cache, GFP_KERNEL);
-+ if (!event_return)
-+ goto register_error;
-+ event_return->chan = chan;
-+ event_return->filter = filter;
-+ event_return->id = chan->free_event_id++;
-+ event_return->enabled = 1;
-+ event_return->instrumentation = event_param->instrumentation;
-+ /*
-+ * Populate lttng_event structure before kretprobe registration.
-+ */
-+ smp_wmb();
-+ ret = lttng_kretprobes_register(event_param->name,
-+ event_param->u.kretprobe.symbol_name,
-+ event_param->u.kretprobe.offset,
-+ event_param->u.kretprobe.addr,
-+ event, event_return);
-+ if (ret) {
-+ kmem_cache_free(event_cache, event_return);
-+ goto register_error;
-+ }
-+ /* Take 2 refs on the module: one per event. */
-+ ret = try_module_get(event->desc->owner);
-+ WARN_ON_ONCE(!ret);
-+ ret = try_module_get(event->desc->owner);
-+ WARN_ON_ONCE(!ret);
-+ ret = _lttng_event_metadata_statedump(chan->session, chan,
-+ event_return);
-+ if (ret) {
-+ kmem_cache_free(event_cache, event_return);
-+ module_put(event->desc->owner);
-+ module_put(event->desc->owner);
-+ goto statedump_error;
-+ }
-+ list_add(&event_return->list, &chan->session->events);
-+ break;
-+ }
-+ case LTTNG_KERNEL_FUNCTION:
-+ ret = lttng_ftrace_register(event_param->name,
-+ event_param->u.ftrace.symbol_name,
-+ event);
-+ if (ret)
-+ goto register_error;
-+ ret = try_module_get(event->desc->owner);
-+ WARN_ON_ONCE(!ret);
-+ break;
-+ case LTTNG_KERNEL_NOOP:
-+ event->desc = internal_desc;
-+ if (!event->desc)
-+ goto register_error;
-+ break;
-+ default:
-+ WARN_ON_ONCE(1);
-+ }
-+ ret = _lttng_event_metadata_statedump(chan->session, chan, event);
-+ if (ret)
-+ goto statedump_error;
-+ list_add(&event->list, &chan->session->events);
-+ mutex_unlock(&sessions_mutex);
-+ return event;
-+
-+statedump_error:
-+ /* If a statedump error occurs, events will not be readable. */
-+register_error:
-+ kmem_cache_free(event_cache, event);
-+cache_error:
-+exist:
-+full:
-+ mutex_unlock(&sessions_mutex);
-+ return NULL;
-+}
-+
-+/*
-+ * Only used internally at session destruction.
-+ */
-+int _lttng_event_unregister(struct lttng_event *event)
-+{
-+ int ret = -EINVAL;
-+
-+ switch (event->instrumentation) {
-+ case LTTNG_KERNEL_TRACEPOINT:
-+ ret = tracepoint_probe_unregister(event->desc->name,
-+ event->desc->probe_callback,
-+ event);
-+ if (ret)
-+ return ret;
-+ break;
-+ case LTTNG_KERNEL_KPROBE:
-+ lttng_kprobes_unregister(event);
-+ ret = 0;
-+ break;
-+ case LTTNG_KERNEL_KRETPROBE:
-+ lttng_kretprobes_unregister(event);
-+ ret = 0;
-+ break;
-+ case LTTNG_KERNEL_FUNCTION:
-+ lttng_ftrace_unregister(event);
-+ ret = 0;
-+ break;
-+ case LTTNG_KERNEL_NOOP:
-+ ret = 0;
-+ break;
-+ default:
-+ WARN_ON_ONCE(1);
-+ }
-+ return ret;
-+}
-+
-+/*
-+ * Only used internally at session destruction.
-+ */
-+static
-+void _lttng_event_destroy(struct lttng_event *event)
-+{
-+ switch (event->instrumentation) {
-+ case LTTNG_KERNEL_TRACEPOINT:
-+ lttng_event_put(event->desc);
-+ break;
-+ case LTTNG_KERNEL_KPROBE:
-+ module_put(event->desc->owner);
-+ lttng_kprobes_destroy_private(event);
-+ break;
-+ case LTTNG_KERNEL_KRETPROBE:
-+ module_put(event->desc->owner);
-+ lttng_kretprobes_destroy_private(event);
-+ break;
-+ case LTTNG_KERNEL_FUNCTION:
-+ module_put(event->desc->owner);
-+ lttng_ftrace_destroy_private(event);
-+ break;
-+ case LTTNG_KERNEL_NOOP:
-+ break;
-+ default:
-+ WARN_ON_ONCE(1);
-+ }
-+ list_del(&event->list);
-+ lttng_destroy_context(event->ctx);
-+ kmem_cache_free(event_cache, event);
-+}
-+
-+/*
-+ * We have exclusive access to our metadata buffer (protected by the
-+ * sessions_mutex), so we can do racy operations such as looking for
-+ * remaining space left in packet and write, since mutual exclusion
-+ * protects us from concurrent writes.
-+ */
-+int lttng_metadata_printf(struct lttng_session *session,
-+ const char *fmt, ...)
-+{
-+ struct lib_ring_buffer_ctx ctx;
-+ struct lttng_channel *chan = session->metadata;
-+ char *str;
-+ int ret = 0, waitret;
-+ size_t len, reserve_len, pos;
-+ va_list ap;
-+
-+ WARN_ON_ONCE(!ACCESS_ONCE(session->active));
-+
-+ va_start(ap, fmt);
-+ str = kvasprintf(GFP_KERNEL, fmt, ap);
-+ va_end(ap);
-+ if (!str)
-+ return -ENOMEM;
-+
-+ len = strlen(str);
-+ pos = 0;
-+
-+ for (pos = 0; pos < len; pos += reserve_len) {
-+ reserve_len = min_t(size_t,
-+ chan->ops->packet_avail_size(chan->chan),
-+ len - pos);
-+ lib_ring_buffer_ctx_init(&ctx, chan->chan, NULL, reserve_len,
-+ sizeof(char), -1);
-+ /*
-+ * We don't care about metadata buffer's records lost
-+ * count, because we always retry here. Report error if
-+ * we need to bail out after timeout or being
-+ * interrupted.
-+ */
-+ waitret = wait_event_interruptible_timeout(*chan->ops->get_writer_buf_wait_queue(chan->chan, -1),
-+ ({
-+ ret = chan->ops->event_reserve(&ctx, 0);
-+ ret != -ENOBUFS || !ret;
-+ }),
-+ msecs_to_jiffies(LTTNG_METADATA_TIMEOUT_MSEC));
-+ if (!waitret || waitret == -ERESTARTSYS || ret) {
-+ printk(KERN_WARNING "LTTng: Failure to write metadata to buffers (%s)\n",
-+ waitret == -ERESTARTSYS ? "interrupted" :
-+ (ret == -ENOBUFS ? "timeout" : "I/O error"));
-+ if (waitret == -ERESTARTSYS)
-+ ret = waitret;
-+ goto end;
-+ }
-+ chan->ops->event_write(&ctx, &str[pos], reserve_len);
-+ chan->ops->event_commit(&ctx);
-+ }
-+end:
-+ kfree(str);
-+ return ret;
-+}
-+
-+static
-+int _lttng_field_statedump(struct lttng_session *session,
-+ const struct lttng_event_field *field)
-+{
-+ int ret = 0;
-+
-+ switch (field->type.atype) {
-+ case atype_integer:
-+ ret = lttng_metadata_printf(session,
-+ " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s;\n",
-+ field->type.u.basic.integer.size,
-+ field->type.u.basic.integer.alignment,
-+ field->type.u.basic.integer.signedness,
-+ (field->type.u.basic.integer.encoding == lttng_encode_none)
-+ ? "none"
-+ : (field->type.u.basic.integer.encoding == lttng_encode_UTF8)
-+ ? "UTF8"
-+ : "ASCII",
-+ field->type.u.basic.integer.base,
-+#ifdef __BIG_ENDIAN
-+ field->type.u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
-+#else
-+ field->type.u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
-+#endif
-+ field->name);
-+ break;
-+ case atype_enum:
-+ ret = lttng_metadata_printf(session,
-+ " %s _%s;\n",
-+ field->type.u.basic.enumeration.name,
-+ field->name);
-+ break;
-+ case atype_array:
-+ {
-+ const struct lttng_basic_type *elem_type;
-+
-+ elem_type = &field->type.u.array.elem_type;
-+ ret = lttng_metadata_printf(session,
-+ " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[%u];\n",
-+ elem_type->u.basic.integer.size,
-+ elem_type->u.basic.integer.alignment,
-+ elem_type->u.basic.integer.signedness,
-+ (elem_type->u.basic.integer.encoding == lttng_encode_none)
-+ ? "none"
-+ : (elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
-+ ? "UTF8"
-+ : "ASCII",
-+ elem_type->u.basic.integer.base,
-+#ifdef __BIG_ENDIAN
-+ elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
-+#else
-+ elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
-+#endif
-+ field->name, field->type.u.array.length);
-+ break;
-+ }
-+ case atype_sequence:
-+ {
-+ const struct lttng_basic_type *elem_type;
-+ const struct lttng_basic_type *length_type;
-+
-+ elem_type = &field->type.u.sequence.elem_type;
-+ length_type = &field->type.u.sequence.length_type;
-+ ret = lttng_metadata_printf(session,
-+ " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } __%s_length;\n",
-+ length_type->u.basic.integer.size,
-+ (unsigned int) length_type->u.basic.integer.alignment,
-+ length_type->u.basic.integer.signedness,
-+ (length_type->u.basic.integer.encoding == lttng_encode_none)
-+ ? "none"
-+ : ((length_type->u.basic.integer.encoding == lttng_encode_UTF8)
-+ ? "UTF8"
-+ : "ASCII"),
-+ length_type->u.basic.integer.base,
-+#ifdef __BIG_ENDIAN
-+ length_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
-+#else
-+ length_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
-+#endif
-+ field->name);
-+ if (ret)
-+ return ret;
-+
-+ ret = lttng_metadata_printf(session,
-+ " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[ __%s_length ];\n",
-+ elem_type->u.basic.integer.size,
-+ (unsigned int) elem_type->u.basic.integer.alignment,
-+ elem_type->u.basic.integer.signedness,
-+ (elem_type->u.basic.integer.encoding == lttng_encode_none)
-+ ? "none"
-+ : ((elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
-+ ? "UTF8"
-+ : "ASCII"),
-+ elem_type->u.basic.integer.base,
-+#ifdef __BIG_ENDIAN
-+ elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
-+#else
-+ elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
-+#endif
-+ field->name,
-+ field->name);
-+ break;
-+ }
-+
-+ case atype_string:
-+ /* Default encoding is UTF8 */
-+ ret = lttng_metadata_printf(session,
-+ " string%s _%s;\n",
-+ field->type.u.basic.string.encoding == lttng_encode_ASCII ?
-+ " { encoding = ASCII; }" : "",
-+ field->name);
-+ break;
-+ default:
-+ WARN_ON_ONCE(1);
-+ return -EINVAL;
-+ }
-+ return ret;
-+}
-+
-+static
-+int _lttng_context_metadata_statedump(struct lttng_session *session,
-+ struct lttng_ctx *ctx)
-+{
-+ int ret = 0;
-+ int i;
-+
-+ if (!ctx)
-+ return 0;
-+ for (i = 0; i < ctx->nr_fields; i++) {
-+ const struct lttng_ctx_field *field = &ctx->fields[i];
-+
-+ ret = _lttng_field_statedump(session, &field->event_field);
-+ if (ret)
-+ return ret;
-+ }
-+ return ret;
-+}
-+
-+static
-+int _lttng_fields_metadata_statedump(struct lttng_session *session,
-+ struct lttng_event *event)
-+{
-+ const struct lttng_event_desc *desc = event->desc;
-+ int ret = 0;
-+ int i;
-+
-+ for (i = 0; i < desc->nr_fields; i++) {
-+ const struct lttng_event_field *field = &desc->fields[i];
-+
-+ ret = _lttng_field_statedump(session, field);
-+ if (ret)
-+ return ret;
-+ }
-+ return ret;
-+}
-+
-+static
-+int _lttng_event_metadata_statedump(struct lttng_session *session,
-+ struct lttng_channel *chan,
-+ struct lttng_event *event)
-+{
-+ int ret = 0;
-+
-+ if (event->metadata_dumped || !ACCESS_ONCE(session->active))
-+ return 0;
-+ if (chan == session->metadata)
-+ return 0;
-+
-+ ret = lttng_metadata_printf(session,
-+ "event {\n"
-+ " name = %s;\n"
-+ " id = %u;\n"
-+ " stream_id = %u;\n",
-+ event->desc->name,
-+ event->id,
-+ event->chan->id);
-+ if (ret)
-+ goto end;
-+
-+ if (event->ctx) {
-+ ret = lttng_metadata_printf(session,
-+ " context := struct {\n");
-+ if (ret)
-+ goto end;
-+ }
-+ ret = _lttng_context_metadata_statedump(session, event->ctx);
-+ if (ret)
-+ goto end;
-+ if (event->ctx) {
-+ ret = lttng_metadata_printf(session,
-+ " };\n");
-+ if (ret)
-+ goto end;
-+ }
-+
-+ ret = lttng_metadata_printf(session,
-+ " fields := struct {\n"
-+ );
-+ if (ret)
-+ goto end;
-+
-+ ret = _lttng_fields_metadata_statedump(session, event);
-+ if (ret)
-+ goto end;
-+
-+ /*
-+ * LTTng space reservation can only reserve multiples of the
-+ * byte size.
-+ */
-+ ret = lttng_metadata_printf(session,
-+ " };\n"
-+ "};\n\n");
-+ if (ret)
-+ goto end;
-+
-+ event->metadata_dumped = 1;
-+end:
-+ return ret;
-+
-+}
-+
-+static
-+int _lttng_channel_metadata_statedump(struct lttng_session *session,
-+ struct lttng_channel *chan)
-+{
-+ int ret = 0;
-+
-+ if (chan->metadata_dumped || !ACCESS_ONCE(session->active))
-+ return 0;
-+ if (chan == session->metadata)
-+ return 0;
-+
-+ WARN_ON_ONCE(!chan->header_type);
-+ ret = lttng_metadata_printf(session,
-+ "stream {\n"
-+ " id = %u;\n"
-+ " event.header := %s;\n"
-+ " packet.context := struct packet_context;\n",
-+ chan->id,
-+ chan->header_type == 1 ? "struct event_header_compact" :
-+ "struct event_header_large");
-+ if (ret)
-+ goto end;
-+
-+ if (chan->ctx) {
-+ ret = lttng_metadata_printf(session,
-+ " event.context := struct {\n");
-+ if (ret)
-+ goto end;
-+ }
-+ ret = _lttng_context_metadata_statedump(session, chan->ctx);
-+ if (ret)
-+ goto end;
-+ if (chan->ctx) {
-+ ret = lttng_metadata_printf(session,
-+ " };\n");
-+ if (ret)
-+ goto end;
-+ }
-+
-+ ret = lttng_metadata_printf(session,
-+ "};\n\n");
-+
-+ chan->metadata_dumped = 1;
-+end:
-+ return ret;
-+}
-+
-+static
-+int _lttng_stream_packet_context_declare(struct lttng_session *session)
-+{
-+ return lttng_metadata_printf(session,
-+ "struct packet_context {\n"
-+ " uint64_clock_monotonic_t timestamp_begin;\n"
-+ " uint64_clock_monotonic_t timestamp_end;\n"
-+ " uint32_t events_discarded;\n"
-+ " uint32_t content_size;\n"
-+ " uint32_t packet_size;\n"
-+ " uint32_t cpu_id;\n"
-+ "};\n\n"
-+ );
-+}
-+
-+/*
-+ * Compact header:
-+ * id: range: 0 - 30.
-+ * id 31 is reserved to indicate an extended header.
-+ *
-+ * Large header:
-+ * id: range: 0 - 65534.
-+ * id 65535 is reserved to indicate an extended header.
-+ */
-+static
-+int _lttng_event_header_declare(struct lttng_session *session)
-+{
-+ return lttng_metadata_printf(session,
-+ "struct event_header_compact {\n"
-+ " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
-+ " variant <id> {\n"
-+ " struct {\n"
-+ " uint27_clock_monotonic_t timestamp;\n"
-+ " } compact;\n"
-+ " struct {\n"
-+ " uint32_t id;\n"
-+ " uint64_clock_monotonic_t timestamp;\n"
-+ " } extended;\n"
-+ " } v;\n"
-+ "} align(%u);\n"
-+ "\n"
-+ "struct event_header_large {\n"
-+ " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
-+ " variant <id> {\n"
-+ " struct {\n"
-+ " uint32_clock_monotonic_t timestamp;\n"
-+ " } compact;\n"
-+ " struct {\n"
-+ " uint32_t id;\n"
-+ " uint64_clock_monotonic_t timestamp;\n"
-+ " } extended;\n"
-+ " } v;\n"
-+ "} align(%u);\n\n",
-+ lttng_alignof(uint32_t) * CHAR_BIT,
-+ lttng_alignof(uint16_t) * CHAR_BIT
-+ );
-+}
-+
-+ /*
-+ * Approximation of NTP time of day to clock monotonic correlation,
-+ * taken at start of trace.
-+ * Yes, this is only an approximation. Yes, we can (and will) do better
-+ * in future versions.
-+ */
-+static
-+uint64_t measure_clock_offset(void)
-+{
-+ uint64_t offset, monotonic[2], realtime;
-+ struct timespec rts = { 0, 0 };
-+ unsigned long flags;
-+
-+ /* Disable interrupts to increase correlation precision. */
-+ local_irq_save(flags);
-+ monotonic[0] = trace_clock_read64();
-+ getnstimeofday(&rts);
-+ monotonic[1] = trace_clock_read64();
-+ local_irq_restore(flags);
-+
-+ offset = (monotonic[0] + monotonic[1]) >> 1;
-+ realtime = (uint64_t) rts.tv_sec * NSEC_PER_SEC;
-+ realtime += rts.tv_nsec;
-+ offset = realtime - offset;
-+ return offset;
-+}
-+
-+/*
-+ * Output metadata into this session's metadata buffers.
-+ */
-+static
-+int _lttng_session_metadata_statedump(struct lttng_session *session)
-+{
-+ unsigned char *uuid_c = session->uuid.b;
-+ unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
-+ struct lttng_channel *chan;
-+ struct lttng_event *event;
-+ int ret = 0;
-+
-+ if (!ACCESS_ONCE(session->active))
-+ return 0;
-+ if (session->metadata_dumped)
-+ goto skip_session;
-+ if (!session->metadata) {
-+ printk(KERN_WARNING "LTTng: attempt to start tracing, but metadata channel is not found. Operation abort.\n");
-+ return -EPERM;
-+ }
-+
-+ snprintf(uuid_s, sizeof(uuid_s),
-+ "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
-+ uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
-+ uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
-+ uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
-+ uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
-+
-+ ret = lttng_metadata_printf(session,
-+ "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
-+ "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
-+ "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
-+ "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
-+ "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
-+ "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
-+ "\n"
-+ "trace {\n"
-+ " major = %u;\n"
-+ " minor = %u;\n"
-+ " uuid = \"%s\";\n"
-+ " byte_order = %s;\n"
-+ " packet.header := struct {\n"
-+ " uint32_t magic;\n"
-+ " uint8_t uuid[16];\n"
-+ " uint32_t stream_id;\n"
-+ " };\n"
-+ "};\n\n",
-+ lttng_alignof(uint8_t) * CHAR_BIT,
-+ lttng_alignof(uint16_t) * CHAR_BIT,
-+ lttng_alignof(uint32_t) * CHAR_BIT,
-+ lttng_alignof(uint64_t) * CHAR_BIT,
-+ CTF_SPEC_MAJOR,
-+ CTF_SPEC_MINOR,
-+ uuid_s,
-+#ifdef __BIG_ENDIAN
-+ "be"
-+#else
-+ "le"
-+#endif
-+ );
-+ if (ret)
-+ goto end;
-+
-+ ret = lttng_metadata_printf(session,
-+ "env {\n"
-+ " domain = \"kernel\";\n"
-+ " sysname = \"%s\";\n"
-+ " kernel_release = \"%s\";\n"
-+ " kernel_version = \"%s\";\n"
-+ " tracer_name = \"lttng-modules\";\n"
-+ " tracer_major = %d;\n"
-+ " tracer_minor = %d;\n"
-+ " tracer_patchlevel = %d;\n"
-+ "};\n\n",
-+ utsname()->sysname,
-+ utsname()->release,
-+ utsname()->version,
-+ LTTNG_MODULES_MAJOR_VERSION,
-+ LTTNG_MODULES_MINOR_VERSION,
-+ LTTNG_MODULES_PATCHLEVEL_VERSION
-+ );
-+ if (ret)
-+ goto end;
-+
-+ ret = lttng_metadata_printf(session,
-+ "clock {\n"
-+ " name = %s;\n",
-+ "monotonic"
-+ );
-+ if (ret)
-+ goto end;
-+
-+ if (!trace_clock_uuid(clock_uuid_s)) {
-+ ret = lttng_metadata_printf(session,
-+ " uuid = \"%s\";\n",
-+ clock_uuid_s
-+ );
-+ if (ret)
-+ goto end;
-+ }
-+
-+ ret = lttng_metadata_printf(session,
-+ " description = \"Monotonic Clock\";\n"
-+ " freq = %llu; /* Frequency, in Hz */\n"
-+ " /* clock value offset from Epoch is: offset * (1/freq) */\n"
-+ " offset = %llu;\n"
-+ "};\n\n",
-+ (unsigned long long) trace_clock_freq(),
-+ (unsigned long long) measure_clock_offset()
-+ );
-+ if (ret)
-+ goto end;
-+
-+ ret = lttng_metadata_printf(session,
-+ "typealias integer {\n"
-+ " size = 27; align = 1; signed = false;\n"
-+ " map = clock.monotonic.value;\n"
-+ "} := uint27_clock_monotonic_t;\n"
-+ "\n"
-+ "typealias integer {\n"
-+ " size = 32; align = %u; signed = false;\n"
-+ " map = clock.monotonic.value;\n"
-+ "} := uint32_clock_monotonic_t;\n"
-+ "\n"
-+ "typealias integer {\n"
-+ " size = 64; align = %u; signed = false;\n"
-+ " map = clock.monotonic.value;\n"
-+ "} := uint64_clock_monotonic_t;\n\n",
-+ lttng_alignof(uint32_t) * CHAR_BIT,
-+ lttng_alignof(uint64_t) * CHAR_BIT
-+ );
-+ if (ret)
-+ goto end;
-+
-+ ret = _lttng_stream_packet_context_declare(session);
-+ if (ret)
-+ goto end;
-+
-+ ret = _lttng_event_header_declare(session);
-+ if (ret)
-+ goto end;
-+
-+skip_session:
-+ list_for_each_entry(chan, &session->chan, list) {
-+ ret = _lttng_channel_metadata_statedump(session, chan);
-+ if (ret)
-+ goto end;
-+ }
-+
-+ list_for_each_entry(event, &session->events, list) {
-+ ret = _lttng_event_metadata_statedump(session, event->chan, event);
-+ if (ret)
-+ goto end;
-+ }
-+ session->metadata_dumped = 1;
-+end:
-+ return ret;
-+}
-+
-+/**
-+ * lttng_transport_register - LTT transport registration
-+ * @transport: transport structure
-+ *
-+ * Registers a transport which can be used as output to extract the data out of
-+ * LTTng. The module calling this registration function must ensure that no
-+ * trap-inducing code will be executed by the transport functions. E.g.
-+ * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
-+ * is made visible to the transport function. This registration acts as a
-+ * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
-+ * after its registration must it synchronize the TLBs.
-+ */
-+void lttng_transport_register(struct lttng_transport *transport)
-+{
-+ /*
-+ * Make sure no page fault can be triggered by the module about to be
-+ * registered. We deal with this here so we don't have to call
-+ * vmalloc_sync_all() in each module's init.
-+ */
-+ wrapper_vmalloc_sync_all();
-+
-+ mutex_lock(&sessions_mutex);
-+ list_add_tail(&transport->node, &lttng_transport_list);
-+ mutex_unlock(&sessions_mutex);
-+}
-+EXPORT_SYMBOL_GPL(lttng_transport_register);
-+
-+/**
-+ * lttng_transport_unregister - LTT transport unregistration
-+ * @transport: transport structure
-+ */
-+void lttng_transport_unregister(struct lttng_transport *transport)
-+{
-+ mutex_lock(&sessions_mutex);
-+ list_del(&transport->node);
-+ mutex_unlock(&sessions_mutex);
-+}
-+EXPORT_SYMBOL_GPL(lttng_transport_unregister);
-+
-+static int __init lttng_events_init(void)
-+{
-+ int ret;
-+
-+ event_cache = KMEM_CACHE(lttng_event, 0);
-+ if (!event_cache)
-+ return -ENOMEM;
-+ ret = lttng_abi_init();
-+ if (ret)
-+ goto error_abi;
-+ return 0;
-+error_abi:
-+ kmem_cache_destroy(event_cache);
-+ return ret;
-+}
-+
-+module_init(lttng_events_init);
-+
-+static void __exit lttng_events_exit(void)
-+{
-+ struct lttng_session *session, *tmpsession;
-+
-+ lttng_abi_exit();
-+ list_for_each_entry_safe(session, tmpsession, &sessions, list)
-+ lttng_session_destroy(session);
-+ kmem_cache_destroy(event_cache);
-+}
-+
-+module_exit(lttng_events_exit);
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-+MODULE_DESCRIPTION("LTTng Events");
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-events.h
-@@ -0,0 +1,466 @@
-+#ifndef _LTTNG_EVENTS_H
-+#define _LTTNG_EVENTS_H
-+
-+/*
-+ * lttng-events.h
-+ *
-+ * Holds LTTng per-session event registry.
-+ *
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#include <linux/list.h>
-+#include <linux/kprobes.h>
-+#include "wrapper/uuid.h"
-+#include "lttng-abi.h"
-+
-+#undef is_signed_type
-+#define is_signed_type(type) (((type)(-1)) < 0)
-+
-+struct lttng_channel;
-+struct lttng_session;
-+struct lib_ring_buffer_ctx;
-+struct perf_event;
-+struct perf_event_attr;
-+
-+/* Type description */
-+
-+/* Update the astract_types name table in lttng-types.c along with this enum */
-+enum abstract_types {
-+ atype_integer,
-+ atype_enum,
-+ atype_array,
-+ atype_sequence,
-+ atype_string,
-+ NR_ABSTRACT_TYPES,
-+};
-+
-+/* Update the string_encodings name table in lttng-types.c along with this enum */
-+enum lttng_string_encodings {
-+ lttng_encode_none = 0,
-+ lttng_encode_UTF8 = 1,
-+ lttng_encode_ASCII = 2,
-+ NR_STRING_ENCODINGS,
-+};
-+
-+struct lttng_enum_entry {
-+ unsigned long long start, end; /* start and end are inclusive */
-+ const char *string;
-+};
-+
-+#define __type_integer(_type, _byte_order, _base, _encoding) \
-+ { \
-+ .atype = atype_integer, \
-+ .u.basic.integer = \
-+ { \
-+ .size = sizeof(_type) * CHAR_BIT, \
-+ .alignment = lttng_alignof(_type) * CHAR_BIT, \
-+ .signedness = is_signed_type(_type), \
-+ .reverse_byte_order = _byte_order != __BYTE_ORDER, \
-+ .base = _base, \
-+ .encoding = lttng_encode_##_encoding, \
-+ }, \
-+ } \
-+
-+struct lttng_integer_type {
-+ unsigned int size; /* in bits */
-+ unsigned short alignment; /* in bits */
-+ unsigned int signedness:1,
-+ reverse_byte_order:1;
-+ unsigned int base; /* 2, 8, 10, 16, for pretty print */
-+ enum lttng_string_encodings encoding;
-+};
-+
-+union _lttng_basic_type {
-+ struct lttng_integer_type integer;
-+ struct {
-+ const char *name;
-+ } enumeration;
-+ struct {
-+ enum lttng_string_encodings encoding;
-+ } string;
-+};
-+
-+struct lttng_basic_type {
-+ enum abstract_types atype;
-+ union {
-+ union _lttng_basic_type basic;
-+ } u;
-+};
-+
-+struct lttng_type {
-+ enum abstract_types atype;
-+ union {
-+ union _lttng_basic_type basic;
-+ struct {
-+ struct lttng_basic_type elem_type;
-+ unsigned int length; /* num. elems. */
-+ } array;
-+ struct {
-+ struct lttng_basic_type length_type;
-+ struct lttng_basic_type elem_type;
-+ } sequence;
-+ } u;
-+};
-+
-+struct lttng_enum {
-+ const char *name;
-+ struct lttng_type container_type;
-+ const struct lttng_enum_entry *entries;
-+ unsigned int len;
-+};
-+
-+/* Event field description */
-+
-+struct lttng_event_field {
-+ const char *name;
-+ struct lttng_type type;
-+};
-+
-+/*
-+ * We need to keep this perf counter field separately from struct
-+ * lttng_ctx_field because cpu hotplug needs fixed-location addresses.
-+ */
-+struct lttng_perf_counter_field {
-+ struct notifier_block nb;
-+ int hp_enable;
-+ struct perf_event_attr *attr;
-+ struct perf_event **e; /* per-cpu array */
-+};
-+
-+struct lttng_ctx_field {
-+ struct lttng_event_field event_field;
-+ size_t (*get_size)(size_t offset);
-+ void (*record)(struct lttng_ctx_field *field,
-+ struct lib_ring_buffer_ctx *ctx,
-+ struct lttng_channel *chan);
-+ union {
-+ struct lttng_perf_counter_field *perf_counter;
-+ } u;
-+ void (*destroy)(struct lttng_ctx_field *field);
-+};
-+
-+struct lttng_ctx {
-+ struct lttng_ctx_field *fields;
-+ unsigned int nr_fields;
-+ unsigned int allocated_fields;
-+};
-+
-+struct lttng_event_desc {
-+ const char *name;
-+ void *probe_callback;
-+ const struct lttng_event_ctx *ctx; /* context */
-+ const struct lttng_event_field *fields; /* event payload */
-+ unsigned int nr_fields;
-+ struct module *owner;
-+};
-+
-+struct lttng_probe_desc {
-+ const struct lttng_event_desc **event_desc;
-+ unsigned int nr_events;
-+ struct list_head head; /* chain registered probes */
-+};
-+
-+struct lttng_krp; /* Kretprobe handling */
-+
-+/*
-+ * lttng_event structure is referred to by the tracing fast path. It must be
-+ * kept small.
-+ */
-+struct lttng_event {
-+ unsigned int id;
-+ struct lttng_channel *chan;
-+ int enabled;
-+ const struct lttng_event_desc *desc;
-+ void *filter;
-+ struct lttng_ctx *ctx;
-+ enum lttng_kernel_instrumentation instrumentation;
-+ union {
-+ struct {
-+ struct kprobe kp;
-+ char *symbol_name;
-+ } kprobe;
-+ struct {
-+ struct lttng_krp *lttng_krp;
-+ char *symbol_name;
-+ } kretprobe;
-+ struct {
-+ char *symbol_name;
-+ } ftrace;
-+ } u;
-+ struct list_head list; /* Event list */
-+ unsigned int metadata_dumped:1;
-+};
-+
-+struct lttng_channel_ops {
-+ struct channel *(*channel_create)(const char *name,
-+ struct lttng_channel *lttng_chan,
-+ void *buf_addr,
-+ size_t subbuf_size, size_t num_subbuf,
-+ unsigned int switch_timer_interval,
-+ unsigned int read_timer_interval);
-+ void (*channel_destroy)(struct channel *chan);
-+ struct lib_ring_buffer *(*buffer_read_open)(struct channel *chan);
-+ int (*buffer_has_read_closed_stream)(struct channel *chan);
-+ void (*buffer_read_close)(struct lib_ring_buffer *buf);
-+ int (*event_reserve)(struct lib_ring_buffer_ctx *ctx,
-+ uint32_t event_id);
-+ void (*event_commit)(struct lib_ring_buffer_ctx *ctx);
-+ void (*event_write)(struct lib_ring_buffer_ctx *ctx, const void *src,
-+ size_t len);
-+ void (*event_write_from_user)(struct lib_ring_buffer_ctx *ctx,
-+ const void *src, size_t len);
-+ void (*event_memset)(struct lib_ring_buffer_ctx *ctx,
-+ int c, size_t len);
-+ /*
-+ * packet_avail_size returns the available size in the current
-+ * packet. Note that the size returned is only a hint, since it
-+ * may change due to concurrent writes.
-+ */
-+ size_t (*packet_avail_size)(struct channel *chan);
-+ wait_queue_head_t *(*get_writer_buf_wait_queue)(struct channel *chan, int cpu);
-+ wait_queue_head_t *(*get_hp_wait_queue)(struct channel *chan);
-+ int (*is_finalized)(struct channel *chan);
-+ int (*is_disabled)(struct channel *chan);
-+};
-+
-+struct lttng_transport {
-+ char *name;
-+ struct module *owner;
-+ struct list_head node;
-+ struct lttng_channel_ops ops;
-+};
-+
-+struct lttng_channel {
-+ unsigned int id;
-+ struct channel *chan; /* Channel buffers */
-+ int enabled;
-+ struct lttng_ctx *ctx;
-+ /* Event ID management */
-+ struct lttng_session *session;
-+ struct file *file; /* File associated to channel */
-+ unsigned int free_event_id; /* Next event ID to allocate */
-+ struct list_head list; /* Channel list */
-+ struct lttng_channel_ops *ops;
-+ struct lttng_transport *transport;
-+ struct lttng_event **sc_table; /* for syscall tracing */
-+ struct lttng_event **compat_sc_table;
-+ struct lttng_event *sc_unknown; /* for unknown syscalls */
-+ struct lttng_event *sc_compat_unknown;
-+ struct lttng_event *sc_exit; /* for syscall exit */
-+ int header_type; /* 0: unset, 1: compact, 2: large */
-+ unsigned int metadata_dumped:1;
-+};
-+
-+struct lttng_session {
-+ int active; /* Is trace session active ? */
-+ int been_active; /* Has trace session been active ? */
-+ struct file *file; /* File associated to session */
-+ struct lttng_channel *metadata; /* Metadata channel */
-+ struct list_head chan; /* Channel list head */
-+ struct list_head events; /* Event list head */
-+ struct list_head list; /* Session list */
-+ unsigned int free_chan_id; /* Next chan ID to allocate */
-+ uuid_le uuid; /* Trace session unique ID */
-+ unsigned int metadata_dumped:1;
-+};
-+
-+struct lttng_session *lttng_session_create(void);
-+int lttng_session_enable(struct lttng_session *session);
-+int lttng_session_disable(struct lttng_session *session);
-+void lttng_session_destroy(struct lttng_session *session);
-+
-+struct lttng_channel *lttng_channel_create(struct lttng_session *session,
-+ const char *transport_name,
-+ void *buf_addr,
-+ size_t subbuf_size, size_t num_subbuf,
-+ unsigned int switch_timer_interval,
-+ unsigned int read_timer_interval);
-+struct lttng_channel *lttng_global_channel_create(struct lttng_session *session,
-+ int overwrite, void *buf_addr,
-+ size_t subbuf_size, size_t num_subbuf,
-+ unsigned int switch_timer_interval,
-+ unsigned int read_timer_interval);
-+
-+struct lttng_event *lttng_event_create(struct lttng_channel *chan,
-+ struct lttng_kernel_event *event_param,
-+ void *filter,
-+ const struct lttng_event_desc *internal_desc);
-+
-+int lttng_channel_enable(struct lttng_channel *channel);
-+int lttng_channel_disable(struct lttng_channel *channel);
-+int lttng_event_enable(struct lttng_event *event);
-+int lttng_event_disable(struct lttng_event *event);
-+
-+void lttng_transport_register(struct lttng_transport *transport);
-+void lttng_transport_unregister(struct lttng_transport *transport);
-+
-+void synchronize_trace(void);
-+int lttng_abi_init(void);
-+void lttng_abi_exit(void);
-+
-+int lttng_probe_register(struct lttng_probe_desc *desc);
-+void lttng_probe_unregister(struct lttng_probe_desc *desc);
-+const struct lttng_event_desc *lttng_event_get(const char *name);
-+void lttng_event_put(const struct lttng_event_desc *desc);
-+int lttng_probes_init(void);
-+void lttng_probes_exit(void);
-+
-+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
-+int lttng_syscalls_register(struct lttng_channel *chan, void *filter);
-+int lttng_syscalls_unregister(struct lttng_channel *chan);
-+#else
-+static inline int lttng_syscalls_register(struct lttng_channel *chan, void *filter)
-+{
-+ return -ENOSYS;
-+}
-+
-+static inline int lttng_syscalls_unregister(struct lttng_channel *chan)
-+{
-+ return 0;
-+}
-+#endif
-+
-+struct lttng_ctx_field *lttng_append_context(struct lttng_ctx **ctx);
-+int lttng_find_context(struct lttng_ctx *ctx, const char *name);
-+void lttng_remove_context_field(struct lttng_ctx **ctx,
-+ struct lttng_ctx_field *field);
-+void lttng_destroy_context(struct lttng_ctx *ctx);
-+int lttng_add_pid_to_ctx(struct lttng_ctx **ctx);
-+int lttng_add_procname_to_ctx(struct lttng_ctx **ctx);
-+int lttng_add_prio_to_ctx(struct lttng_ctx **ctx);
-+int lttng_add_nice_to_ctx(struct lttng_ctx **ctx);
-+int lttng_add_vpid_to_ctx(struct lttng_ctx **ctx);
-+int lttng_add_tid_to_ctx(struct lttng_ctx **ctx);
-+int lttng_add_vtid_to_ctx(struct lttng_ctx **ctx);
-+int lttng_add_ppid_to_ctx(struct lttng_ctx **ctx);
-+int lttng_add_vppid_to_ctx(struct lttng_ctx **ctx);
-+#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
-+int lttng_add_perf_counter_to_ctx(uint32_t type,
-+ uint64_t config,
-+ const char *name,
-+ struct lttng_ctx **ctx);
-+#else
-+static inline
-+int lttng_add_perf_counter_to_ctx(uint32_t type,
-+ uint64_t config,
-+ const char *name,
-+ struct lttng_ctx **ctx)
-+{
-+ return -ENOSYS;
-+}
-+#endif
-+
-+extern int lttng_statedump_start(struct lttng_session *session);
-+
-+#ifdef CONFIG_KPROBES
-+int lttng_kprobes_register(const char *name,
-+ const char *symbol_name,
-+ uint64_t offset,
-+ uint64_t addr,
-+ struct lttng_event *event);
-+void lttng_kprobes_unregister(struct lttng_event *event);
-+void lttng_kprobes_destroy_private(struct lttng_event *event);
-+#else
-+static inline
-+int lttng_kprobes_register(const char *name,
-+ const char *symbol_name,
-+ uint64_t offset,
-+ uint64_t addr,
-+ struct lttng_event *event)
-+{
-+ return -ENOSYS;
-+}
-+
-+static inline
-+void lttng_kprobes_unregister(struct lttng_event *event)
-+{
-+}
-+
-+static inline
-+void lttng_kprobes_destroy_private(struct lttng_event *event)
-+{
-+}
-+#endif
-+
-+#ifdef CONFIG_KRETPROBES
-+int lttng_kretprobes_register(const char *name,
-+ const char *symbol_name,
-+ uint64_t offset,
-+ uint64_t addr,
-+ struct lttng_event *event_entry,
-+ struct lttng_event *event_exit);
-+void lttng_kretprobes_unregister(struct lttng_event *event);
-+void lttng_kretprobes_destroy_private(struct lttng_event *event);
-+#else
-+static inline
-+int lttng_kretprobes_register(const char *name,
-+ const char *symbol_name,
-+ uint64_t offset,
-+ uint64_t addr,
-+ struct lttng_event *event_entry,
-+ struct lttng_event *event_exit)
-+{
-+ return -ENOSYS;
-+}
-+
-+static inline
-+void lttng_kretprobes_unregister(struct lttng_event *event)
-+{
-+}
-+
-+static inline
-+void lttng_kretprobes_destroy_private(struct lttng_event *event)
-+{
-+}
-+#endif
-+
-+#ifdef CONFIG_DYNAMIC_FTRACE
-+int lttng_ftrace_register(const char *name,
-+ const char *symbol_name,
-+ struct lttng_event *event);
-+void lttng_ftrace_unregister(struct lttng_event *event);
-+void lttng_ftrace_destroy_private(struct lttng_event *event);
-+#else
-+static inline
-+int lttng_ftrace_register(const char *name,
-+ const char *symbol_name,
-+ struct lttng_event *event)
-+{
-+ return -ENOSYS;
-+}
-+
-+static inline
-+void lttng_ftrace_unregister(struct lttng_event *event)
-+{
-+}
-+
-+static inline
-+void lttng_ftrace_destroy_private(struct lttng_event *event)
-+{
-+}
-+#endif
-+
-+int lttng_calibrate(struct lttng_kernel_calibrate *calibrate);
-+
-+extern const struct file_operations lttng_tracepoint_list_fops;
-+
-+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
-+#define TRACEPOINT_HAS_DATA_ARG
-+#endif
-+
-+#endif /* _LTTNG_EVENTS_H */
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-probes.c
-@@ -0,0 +1,176 @@
-+/*
-+ * lttng-probes.c
-+ *
-+ * Holds LTTng probes registry.
-+ *
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/list.h>
-+#include <linux/mutex.h>
-+#include <linux/seq_file.h>
-+
-+#include "lttng-events.h"
-+
-+static LIST_HEAD(probe_list);
-+static DEFINE_MUTEX(probe_mutex);
-+
-+static
-+const struct lttng_event_desc *find_event(const char *name)
-+{
-+ struct lttng_probe_desc *probe_desc;
-+ int i;
-+
-+ list_for_each_entry(probe_desc, &probe_list, head) {
-+ for (i = 0; i < probe_desc->nr_events; i++) {
-+ if (!strcmp(probe_desc->event_desc[i]->name, name))
-+ return probe_desc->event_desc[i];
-+ }
-+ }
-+ return NULL;
-+}
-+
-+int lttng_probe_register(struct lttng_probe_desc *desc)
-+{
-+ int ret = 0;
-+ int i;
-+
-+ mutex_lock(&probe_mutex);
-+ /*
-+ * TODO: This is O(N^2). Turn into a hash table when probe registration
-+ * overhead becomes an issue.
-+ */
-+ for (i = 0; i < desc->nr_events; i++) {
-+ if (find_event(desc->event_desc[i]->name)) {
-+ ret = -EEXIST;
-+ goto end;
-+ }
-+ }
-+ list_add(&desc->head, &probe_list);
-+end:
-+ mutex_unlock(&probe_mutex);
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(lttng_probe_register);
-+
-+void lttng_probe_unregister(struct lttng_probe_desc *desc)
-+{
-+ mutex_lock(&probe_mutex);
-+ list_del(&desc->head);
-+ mutex_unlock(&probe_mutex);
-+}
-+EXPORT_SYMBOL_GPL(lttng_probe_unregister);
-+
-+const struct lttng_event_desc *lttng_event_get(const char *name)
-+{
-+ const struct lttng_event_desc *event;
-+ int ret;
-+
-+ mutex_lock(&probe_mutex);
-+ event = find_event(name);
-+ mutex_unlock(&probe_mutex);
-+ if (!event)
-+ return NULL;
-+ ret = try_module_get(event->owner);
-+ WARN_ON_ONCE(!ret);
-+ return event;
-+}
-+EXPORT_SYMBOL_GPL(lttng_event_get);
-+
-+void lttng_event_put(const struct lttng_event_desc *event)
-+{
-+ module_put(event->owner);
-+}
-+EXPORT_SYMBOL_GPL(lttng_event_put);
-+
-+static
-+void *tp_list_start(struct seq_file *m, loff_t *pos)
-+{
-+ struct lttng_probe_desc *probe_desc;
-+ int iter = 0, i;
-+
-+ mutex_lock(&probe_mutex);
-+ list_for_each_entry(probe_desc, &probe_list, head) {
-+ for (i = 0; i < probe_desc->nr_events; i++) {
-+ if (iter++ >= *pos)
-+ return (void *) probe_desc->event_desc[i];
-+ }
-+ }
-+ /* End of list */
-+ return NULL;
-+}
-+
-+static
-+void *tp_list_next(struct seq_file *m, void *p, loff_t *ppos)
-+{
-+ struct lttng_probe_desc *probe_desc;
-+ int iter = 0, i;
-+
-+ (*ppos)++;
-+ list_for_each_entry(probe_desc, &probe_list, head) {
-+ for (i = 0; i < probe_desc->nr_events; i++) {
-+ if (iter++ >= *ppos)
-+ return (void *) probe_desc->event_desc[i];
-+ }
-+ }
-+ /* End of list */
-+ return NULL;
-+}
-+
-+static
-+void tp_list_stop(struct seq_file *m, void *p)
-+{
-+ mutex_unlock(&probe_mutex);
-+}
-+
-+static
-+int tp_list_show(struct seq_file *m, void *p)
-+{
-+ const struct lttng_event_desc *probe_desc = p;
-+
-+ /*
-+ * Don't export lttng internal event: lttng_metadata.
-+ */
-+ if (!strcmp(probe_desc->name, "lttng_metadata"))
-+ return 0;
-+ seq_printf(m, "event { name = %s; };\n",
-+ probe_desc->name);
-+ return 0;
-+}
-+
-+static
-+const struct seq_operations lttng_tracepoint_list_seq_ops = {
-+ .start = tp_list_start,
-+ .next = tp_list_next,
-+ .stop = tp_list_stop,
-+ .show = tp_list_show,
-+};
-+
-+static
-+int lttng_tracepoint_list_open(struct inode *inode, struct file *file)
-+{
-+ return seq_open(file, &lttng_tracepoint_list_seq_ops);
-+}
-+
-+const struct file_operations lttng_tracepoint_list_fops = {
-+ .owner = THIS_MODULE,
-+ .open = lttng_tracepoint_list_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = seq_release,
-+};
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-ring-buffer-client-discard.c
-@@ -0,0 +1,33 @@
-+/*
-+ * lttng-ring-buffer-client-discard.c
-+ *
-+ * LTTng lib ring buffer client (discard mode).
-+ *
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#include <linux/module.h>
-+#include "lttng-tracer.h"
-+
-+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
-+#define RING_BUFFER_MODE_TEMPLATE_STRING "discard"
-+#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_SPLICE
-+#include "lttng-ring-buffer-client.h"
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("LTTng Ring Buffer Client Discard Mode");
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-ring-buffer-client-mmap-discard.c
-@@ -0,0 +1,33 @@
-+/*
-+ * lttng-ring-buffer-client-discard.c
-+ *
-+ * LTTng lib ring buffer client (discard mode).
-+ *
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#include <linux/module.h>
-+#include "lttng-tracer.h"
-+
-+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
-+#define RING_BUFFER_MODE_TEMPLATE_STRING "discard-mmap"
-+#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_MMAP
-+#include "lttng-ring-buffer-client.h"
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("LTTng Ring Buffer Client Discard Mode");
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-ring-buffer-client-mmap-overwrite.c
-@@ -0,0 +1,33 @@
-+/*
-+ * lttng-ring-buffer-client-overwrite.c
-+ *
-+ * LTTng lib ring buffer client (overwrite mode).
-+ *
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#include <linux/module.h>
-+#include "lttng-tracer.h"
-+
-+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_OVERWRITE
-+#define RING_BUFFER_MODE_TEMPLATE_STRING "overwrite-mmap"
-+#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_MMAP
-+#include "lttng-ring-buffer-client.h"
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("LTTng Ring Buffer Client Overwrite Mode");
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-ring-buffer-client-overwrite.c
-@@ -0,0 +1,33 @@
-+/*
-+ * lttng-ring-buffer-client-overwrite.c
-+ *
-+ * LTTng lib ring buffer client (overwrite mode).
-+ *
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#include <linux/module.h>
-+#include "lttng-tracer.h"
-+
-+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_OVERWRITE
-+#define RING_BUFFER_MODE_TEMPLATE_STRING "overwrite"
-+#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_SPLICE
-+#include "lttng-ring-buffer-client.h"
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("LTTng Ring Buffer Client Overwrite Mode");
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-ring-buffer-client.h
-@@ -0,0 +1,598 @@
-+/*
-+ * lttng-ring-buffer-client.h
-+ *
-+ * LTTng lib ring buffer client template.
-+ *
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/types.h>
-+#include "lib/bitfield.h"
-+#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
-+#include "wrapper/trace-clock.h"
-+#include "lttng-events.h"
-+#include "lttng-tracer.h"
-+#include "wrapper/ringbuffer/frontend_types.h"
-+
-+#define LTTNG_COMPACT_EVENT_BITS 5
-+#define LTTNG_COMPACT_TSC_BITS 27
-+
-+/*
-+ * Keep the natural field alignment for _each field_ within this structure if
-+ * you ever add/remove a field from this header. Packed attribute is not used
-+ * because gcc generates poor code on at least powerpc and mips. Don't ever
-+ * let gcc add padding between the structure elements.
-+ *
-+ * The guarantee we have with timestamps is that all the events in a
-+ * packet are included (inclusive) within the begin/end timestamps of
-+ * the packet. Another guarantee we have is that the "timestamp begin",
-+ * as well as the event timestamps, are monotonically increasing (never
-+ * decrease) when moving forward in a stream (physically). But this
-+ * guarantee does not apply to "timestamp end", because it is sampled at
-+ * commit time, which is not ordered with respect to space reservation.
-+ */
-+
-+struct packet_header {
-+ /* Trace packet header */
-+ uint32_t magic; /*
-+ * Trace magic number.
-+ * contains endianness information.
-+ */
-+ uint8_t uuid[16];
-+ uint32_t stream_id;
-+
-+ struct {
-+ /* Stream packet context */
-+ uint64_t timestamp_begin; /* Cycle count at subbuffer start */
-+ uint64_t timestamp_end; /* Cycle count at subbuffer end */
-+ uint32_t events_discarded; /*
-+ * Events lost in this subbuffer since
-+ * the beginning of the trace.
-+ * (may overflow)
-+ */
-+ uint32_t content_size; /* Size of data in subbuffer */
-+ uint32_t packet_size; /* Subbuffer size (include padding) */
-+ uint32_t cpu_id; /* CPU id associated with stream */
-+ uint8_t header_end; /* End of header */
-+ } ctx;
-+};
-+
-+
-+static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
-+{
-+ return trace_clock_read64();
-+}
-+
-+static inline
-+size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
-+{
-+ int i;
-+ size_t orig_offset = offset;
-+
-+ if (likely(!ctx))
-+ return 0;
-+ for (i = 0; i < ctx->nr_fields; i++)
-+ offset += ctx->fields[i].get_size(offset);
-+ return offset - orig_offset;
-+}
-+
-+static inline
-+void ctx_record(struct lib_ring_buffer_ctx *bufctx,
-+ struct lttng_channel *chan,
-+ struct lttng_ctx *ctx)
-+{
-+ int i;
-+
-+ if (likely(!ctx))
-+ return;
-+ for (i = 0; i < ctx->nr_fields; i++)
-+ ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
-+}
-+
-+/*
-+ * record_header_size - Calculate the header size and padding necessary.
-+ * @config: ring buffer instance configuration
-+ * @chan: channel
-+ * @offset: offset in the write buffer
-+ * @pre_header_padding: padding to add before the header (output)
-+ * @ctx: reservation context
-+ *
-+ * Returns the event header size (including padding).
-+ *
-+ * The payload must itself determine its own alignment from the biggest type it
-+ * contains.
-+ */
-+static __inline__
-+unsigned char record_header_size(const struct lib_ring_buffer_config *config,
-+ struct channel *chan, size_t offset,
-+ size_t *pre_header_padding,
-+ struct lib_ring_buffer_ctx *ctx)
-+{
-+ struct lttng_channel *lttng_chan = channel_get_private(chan);
-+ struct lttng_event *event = ctx->priv;
-+ size_t orig_offset = offset;
-+ size_t padding;
-+
-+ switch (lttng_chan->header_type) {
-+ case 1: /* compact */
-+ padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
-+ offset += padding;
-+ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
-+ offset += sizeof(uint32_t); /* id and timestamp */
-+ } else {
-+ /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
-+ offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
-+ /* Align extended struct on largest member */
-+ offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
-+ offset += sizeof(uint32_t); /* id */
-+ offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
-+ offset += sizeof(uint64_t); /* timestamp */
-+ }
-+ break;
-+ case 2: /* large */
-+ padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
-+ offset += padding;
-+ offset += sizeof(uint16_t);
-+ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
-+ offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
-+ offset += sizeof(uint32_t); /* timestamp */
-+ } else {
-+ /* Align extended struct on largest member */
-+ offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
-+ offset += sizeof(uint32_t); /* id */
-+ offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
-+ offset += sizeof(uint64_t); /* timestamp */
-+ }
-+ break;
-+ default:
-+ padding = 0;
-+ WARN_ON_ONCE(1);
-+ }
-+ offset += ctx_get_size(offset, event->ctx);
-+ offset += ctx_get_size(offset, lttng_chan->ctx);
-+
-+ *pre_header_padding = padding;
-+ return offset - orig_offset;
-+}
-+
-+#include "wrapper/ringbuffer/api.h"
-+
-+static
-+void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_ctx *ctx,
-+ uint32_t event_id);
-+
-+/*
-+ * lttng_write_event_header
-+ *
-+ * Writes the event header to the offset (already aligned on 32-bits).
-+ *
-+ * @config: ring buffer instance configuration
-+ * @ctx: reservation context
-+ * @event_id: event ID
-+ */
-+static __inline__
-+void lttng_write_event_header(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_ctx *ctx,
-+ uint32_t event_id)
-+{
-+ struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
-+ struct lttng_event *event = ctx->priv;
-+
-+ if (unlikely(ctx->rflags))
-+ goto slow_path;
-+
-+ switch (lttng_chan->header_type) {
-+ case 1: /* compact */
-+ {
-+ uint32_t id_time = 0;
-+
-+ bt_bitfield_write(&id_time, uint32_t,
-+ 0,
-+ LTTNG_COMPACT_EVENT_BITS,
-+ event_id);
-+ bt_bitfield_write(&id_time, uint32_t,
-+ LTTNG_COMPACT_EVENT_BITS,
-+ LTTNG_COMPACT_TSC_BITS,
-+ ctx->tsc);
-+ lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
-+ break;
-+ }
-+ case 2: /* large */
-+ {
-+ uint32_t timestamp = (uint32_t) ctx->tsc;
-+ uint16_t id = event_id;
-+
-+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
-+ lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
-+ lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
-+ break;
-+ }
-+ default:
-+ WARN_ON_ONCE(1);
-+ }
-+
-+ ctx_record(ctx, lttng_chan, lttng_chan->ctx);
-+ ctx_record(ctx, lttng_chan, event->ctx);
-+ lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
-+
-+ return;
-+
-+slow_path:
-+ lttng_write_event_header_slow(config, ctx, event_id);
-+}
-+
-+static
-+void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
-+ struct lib_ring_buffer_ctx *ctx,
-+ uint32_t event_id)
-+{
-+ struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
-+ struct lttng_event *event = ctx->priv;
-+
-+ switch (lttng_chan->header_type) {
-+ case 1: /* compact */
-+ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
-+ uint32_t id_time = 0;
-+
-+ bt_bitfield_write(&id_time, uint32_t,
-+ 0,
-+ LTTNG_COMPACT_EVENT_BITS,
-+ event_id);
-+ bt_bitfield_write(&id_time, uint32_t,
-+ LTTNG_COMPACT_EVENT_BITS,
-+ LTTNG_COMPACT_TSC_BITS, ctx->tsc);
-+ lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
-+ } else {
-+ uint8_t id = 0;
-+ uint64_t timestamp = ctx->tsc;
-+
-+ bt_bitfield_write(&id, uint8_t,
-+ 0,
-+ LTTNG_COMPACT_EVENT_BITS,
-+ 31);
-+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
-+ /* Align extended struct on largest member */
-+ lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
-+ lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
-+ lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
-+ lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
-+ }
-+ break;
-+ case 2: /* large */
-+ {
-+ if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
-+ uint32_t timestamp = (uint32_t) ctx->tsc;
-+ uint16_t id = event_id;
-+
-+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
-+ lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
-+ lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
-+ } else {
-+ uint16_t id = 65535;
-+ uint64_t timestamp = ctx->tsc;
-+
-+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
-+ /* Align extended struct on largest member */
-+ lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
-+ lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
-+ lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
-+ lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
-+ }
-+ break;
-+ }
-+ default:
-+ WARN_ON_ONCE(1);
-+ }
-+ ctx_record(ctx, lttng_chan, lttng_chan->ctx);
-+ ctx_record(ctx, lttng_chan, event->ctx);
-+ lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
-+}
-+
-+static const struct lib_ring_buffer_config client_config;
-+
-+static u64 client_ring_buffer_clock_read(struct channel *chan)
-+{
-+ return lib_ring_buffer_clock_read(chan);
-+}
-+
-+static
-+size_t client_record_header_size(const struct lib_ring_buffer_config *config,
-+ struct channel *chan, size_t offset,
-+ size_t *pre_header_padding,
-+ struct lib_ring_buffer_ctx *ctx)
-+{
-+ return record_header_size(config, chan, offset,
-+ pre_header_padding, ctx);
-+}
-+
-+/**
-+ * client_packet_header_size - called on buffer-switch to a new sub-buffer
-+ *
-+ * Return header size without padding after the structure. Don't use packed
-+ * structure because gcc generates inefficient code on some architectures
-+ * (powerpc, mips..)
-+ */
-+static size_t client_packet_header_size(void)
-+{
-+ return offsetof(struct packet_header, ctx.header_end);
-+}
-+
-+static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
-+ unsigned int subbuf_idx)
-+{
-+ struct channel *chan = buf->backend.chan;
-+ struct packet_header *header =
-+ (struct packet_header *)
-+ lib_ring_buffer_offset_address(&buf->backend,
-+ subbuf_idx * chan->backend.subbuf_size);
-+ struct lttng_channel *lttng_chan = channel_get_private(chan);
-+ struct lttng_session *session = lttng_chan->session;
-+
-+ header->magic = CTF_MAGIC_NUMBER;
-+ memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
-+ header->stream_id = lttng_chan->id;
-+ header->ctx.timestamp_begin = tsc;
-+ header->ctx.timestamp_end = 0;
-+ header->ctx.events_discarded = 0;
-+ header->ctx.content_size = 0xFFFFFFFF; /* for debugging */
-+ header->ctx.packet_size = 0xFFFFFFFF;
-+ header->ctx.cpu_id = buf->backend.cpu;
-+}
-+
-+/*
-+ * offset is assumed to never be 0 here : never deliver a completely empty
-+ * subbuffer. data_size is between 1 and subbuf_size.
-+ */
-+static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
-+ unsigned int subbuf_idx, unsigned long data_size)
-+{
-+ struct channel *chan = buf->backend.chan;
-+ struct packet_header *header =
-+ (struct packet_header *)
-+ lib_ring_buffer_offset_address(&buf->backend,
-+ subbuf_idx * chan->backend.subbuf_size);
-+ unsigned long records_lost = 0;
-+
-+ header->ctx.timestamp_end = tsc;
-+ header->ctx.content_size = data_size * CHAR_BIT; /* in bits */
-+ header->ctx.packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
-+ records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
-+ records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
-+ records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
-+ header->ctx.events_discarded = records_lost;
-+}
-+
-+static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
-+ int cpu, const char *name)
-+{
-+ return 0;
-+}
-+
-+static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
-+{
-+}
-+
-+static const struct lib_ring_buffer_config client_config = {
-+ .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
-+ .cb.record_header_size = client_record_header_size,
-+ .cb.subbuffer_header_size = client_packet_header_size,
-+ .cb.buffer_begin = client_buffer_begin,
-+ .cb.buffer_end = client_buffer_end,
-+ .cb.buffer_create = client_buffer_create,
-+ .cb.buffer_finalize = client_buffer_finalize,
-+
-+ .tsc_bits = LTTNG_COMPACT_TSC_BITS,
-+ .alloc = RING_BUFFER_ALLOC_PER_CPU,
-+ .sync = RING_BUFFER_SYNC_PER_CPU,
-+ .mode = RING_BUFFER_MODE_TEMPLATE,
-+ .backend = RING_BUFFER_PAGE,
-+ .output = RING_BUFFER_OUTPUT_TEMPLATE,
-+ .oops = RING_BUFFER_OOPS_CONSISTENCY,
-+ .ipi = RING_BUFFER_IPI_BARRIER,
-+ .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
-+};
-+
-+static
-+struct channel *_channel_create(const char *name,
-+ struct lttng_channel *lttng_chan, void *buf_addr,
-+ size_t subbuf_size, size_t num_subbuf,
-+ unsigned int switch_timer_interval,
-+ unsigned int read_timer_interval)
-+{
-+ return channel_create(&client_config, name, lttng_chan, buf_addr,
-+ subbuf_size, num_subbuf, switch_timer_interval,
-+ read_timer_interval);
-+}
-+
-+static
-+void lttng_channel_destroy(struct channel *chan)
-+{
-+ channel_destroy(chan);
-+}
-+
-+static
-+struct lib_ring_buffer *lttng_buffer_read_open(struct channel *chan)
-+{
-+ struct lib_ring_buffer *buf;
-+ int cpu;
-+
-+ for_each_channel_cpu(cpu, chan) {
-+ buf = channel_get_ring_buffer(&client_config, chan, cpu);
-+ if (!lib_ring_buffer_open_read(buf))
-+ return buf;
-+ }
-+ return NULL;
-+}
-+
-+static
-+int lttng_buffer_has_read_closed_stream(struct channel *chan)
-+{
-+ struct lib_ring_buffer *buf;
-+ int cpu;
-+
-+ for_each_channel_cpu(cpu, chan) {
-+ buf = channel_get_ring_buffer(&client_config, chan, cpu);
-+ if (!atomic_long_read(&buf->active_readers))
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static
-+void lttng_buffer_read_close(struct lib_ring_buffer *buf)
-+{
-+ lib_ring_buffer_release_read(buf);
-+}
-+
-+static
-+int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx,
-+ uint32_t event_id)
-+{
-+ struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
-+ int ret, cpu;
-+
-+ cpu = lib_ring_buffer_get_cpu(&client_config);
-+ if (cpu < 0)
-+ return -EPERM;
-+ ctx->cpu = cpu;
-+
-+ switch (lttng_chan->header_type) {
-+ case 1: /* compact */
-+ if (event_id > 30)
-+ ctx->rflags |= LTTNG_RFLAG_EXTENDED;
-+ break;
-+ case 2: /* large */
-+ if (event_id > 65534)
-+ ctx->rflags |= LTTNG_RFLAG_EXTENDED;
-+ break;
-+ default:
-+ WARN_ON_ONCE(1);
-+ }
-+
-+ ret = lib_ring_buffer_reserve(&client_config, ctx);
-+ if (ret)
-+ goto put;
-+ lttng_write_event_header(&client_config, ctx, event_id);
-+ return 0;
-+put:
-+ lib_ring_buffer_put_cpu(&client_config);
-+ return ret;
-+}
-+
-+static
-+void lttng_event_commit(struct lib_ring_buffer_ctx *ctx)
-+{
-+ lib_ring_buffer_commit(&client_config, ctx);
-+ lib_ring_buffer_put_cpu(&client_config);
-+}
-+
-+static
-+void lttng_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
-+ size_t len)
-+{
-+ lib_ring_buffer_write(&client_config, ctx, src, len);
-+}
-+
-+static
-+void lttng_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
-+ const void __user *src, size_t len)
-+{
-+ lib_ring_buffer_copy_from_user(&client_config, ctx, src, len);
-+}
-+
-+static
-+void lttng_event_memset(struct lib_ring_buffer_ctx *ctx,
-+ int c, size_t len)
-+{
-+ lib_ring_buffer_memset(&client_config, ctx, c, len);
-+}
-+
-+static
-+wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
-+{
-+ struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
-+ chan, cpu);
-+ return &buf->write_wait;
-+}
-+
-+static
-+wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
-+{
-+ return &chan->hp_wait;
-+}
-+
-+static
-+int lttng_is_finalized(struct channel *chan)
-+{
-+ return lib_ring_buffer_channel_is_finalized(chan);
-+}
-+
-+static
-+int lttng_is_disabled(struct channel *chan)
-+{
-+ return lib_ring_buffer_channel_is_disabled(chan);
-+}
-+
-+static struct lttng_transport lttng_relay_transport = {
-+ .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
-+ .owner = THIS_MODULE,
-+ .ops = {
-+ .channel_create = _channel_create,
-+ .channel_destroy = lttng_channel_destroy,
-+ .buffer_read_open = lttng_buffer_read_open,
-+ .buffer_has_read_closed_stream =
-+ lttng_buffer_has_read_closed_stream,
-+ .buffer_read_close = lttng_buffer_read_close,
-+ .event_reserve = lttng_event_reserve,
-+ .event_commit = lttng_event_commit,
-+ .event_write = lttng_event_write,
-+ .event_write_from_user = lttng_event_write_from_user,
-+ .event_memset = lttng_event_memset,
-+ .packet_avail_size = NULL, /* Would be racy anyway */
-+ .get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue,
-+ .get_hp_wait_queue = lttng_get_hp_wait_queue,
-+ .is_finalized = lttng_is_finalized,
-+ .is_disabled = lttng_is_disabled,
-+ },
-+};
-+
-+static int __init lttng_ring_buffer_client_init(void)
-+{
-+ /*
-+ * This vmalloc sync all also takes care of the lib ring buffer
-+ * vmalloc'd module pages when it is built as a module into LTTng.
-+ */
-+ wrapper_vmalloc_sync_all();
-+ lttng_transport_register(&lttng_relay_transport);
-+ return 0;
-+}
-+
-+module_init(lttng_ring_buffer_client_init);
-+
-+static void __exit lttng_ring_buffer_client_exit(void)
-+{
-+ lttng_transport_unregister(&lttng_relay_transport);
-+}
-+
-+module_exit(lttng_ring_buffer_client_exit);
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
-+ " client");
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-ring-buffer-metadata-client.c
-@@ -0,0 +1,33 @@
-+/*
-+ * lttng-ring-buffer-metadata-client.c
-+ *
-+ * LTTng lib ring buffer metadta client.
-+ *
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#include <linux/module.h>
-+#include "lttng-tracer.h"
-+
-+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
-+#define RING_BUFFER_MODE_TEMPLATE_STRING "metadata"
-+#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_SPLICE
-+#include "lttng-ring-buffer-metadata-client.h"
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("LTTng Ring Buffer Metadata Client");
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-ring-buffer-metadata-client.h
-@@ -0,0 +1,342 @@
-+/*
-+ * lttng-ring-buffer-client.h
-+ *
-+ * LTTng lib ring buffer client template.
-+ *
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/types.h>
-+#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
-+#include "lttng-events.h"
-+#include "lttng-tracer.h"
-+
-+struct metadata_packet_header {
-+ uint32_t magic; /* 0x75D11D57 */
-+ uint8_t uuid[16]; /* Unique Universal Identifier */
-+ uint32_t checksum; /* 0 if unused */
-+ uint32_t content_size; /* in bits */
-+ uint32_t packet_size; /* in bits */
-+ uint8_t compression_scheme; /* 0 if unused */
-+ uint8_t encryption_scheme; /* 0 if unused */
-+ uint8_t checksum_scheme; /* 0 if unused */
-+ uint8_t major; /* CTF spec major version number */
-+ uint8_t minor; /* CTF spec minor version number */
-+ uint8_t header_end[0];
-+};
-+
-+struct metadata_record_header {
-+ uint8_t header_end[0]; /* End of header */
-+};
-+
-+static const struct lib_ring_buffer_config client_config;
-+
-+static inline
-+u64 lib_ring_buffer_clock_read(struct channel *chan)
-+{
-+ return 0;
-+}
-+
-+static inline
-+unsigned char record_header_size(const struct lib_ring_buffer_config *config,
-+ struct channel *chan, size_t offset,
-+ size_t *pre_header_padding,
-+ struct lib_ring_buffer_ctx *ctx)
-+{
-+ return 0;
-+}
-+
-+#include "wrapper/ringbuffer/api.h"
-+
-+static u64 client_ring_buffer_clock_read(struct channel *chan)
-+{
-+ return 0;
-+}
-+
-+static
-+size_t client_record_header_size(const struct lib_ring_buffer_config *config,
-+ struct channel *chan, size_t offset,
-+ size_t *pre_header_padding,
-+ struct lib_ring_buffer_ctx *ctx)
-+{
-+ return 0;
-+}
-+
-+/**
-+ * client_packet_header_size - called on buffer-switch to a new sub-buffer
-+ *
-+ * Return header size without padding after the structure. Don't use packed
-+ * structure because gcc generates inefficient code on some architectures
-+ * (powerpc, mips..)
-+ */
-+static size_t client_packet_header_size(void)
-+{
-+ return offsetof(struct metadata_packet_header, header_end);
-+}
-+
-+static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
-+ unsigned int subbuf_idx)
-+{
-+ struct channel *chan = buf->backend.chan;
-+ struct metadata_packet_header *header =
-+ (struct metadata_packet_header *)
-+ lib_ring_buffer_offset_address(&buf->backend,
-+ subbuf_idx * chan->backend.subbuf_size);
-+ struct lttng_channel *lttng_chan = channel_get_private(chan);
-+ struct lttng_session *session = lttng_chan->session;
-+
-+ header->magic = TSDL_MAGIC_NUMBER;
-+ memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
-+ header->checksum = 0; /* 0 if unused */
-+ header->content_size = 0xFFFFFFFF; /* in bits, for debugging */
-+ header->packet_size = 0xFFFFFFFF; /* in bits, for debugging */
-+ header->compression_scheme = 0; /* 0 if unused */
-+ header->encryption_scheme = 0; /* 0 if unused */
-+ header->checksum_scheme = 0; /* 0 if unused */
-+ header->major = CTF_SPEC_MAJOR;
-+ header->minor = CTF_SPEC_MINOR;
-+}
-+
-+/*
-+ * offset is assumed to never be 0 here : never deliver a completely empty
-+ * subbuffer. data_size is between 1 and subbuf_size.
-+ */
-+static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
-+ unsigned int subbuf_idx, unsigned long data_size)
-+{
-+ struct channel *chan = buf->backend.chan;
-+ struct metadata_packet_header *header =
-+ (struct metadata_packet_header *)
-+ lib_ring_buffer_offset_address(&buf->backend,
-+ subbuf_idx * chan->backend.subbuf_size);
-+ unsigned long records_lost = 0;
-+
-+ header->content_size = data_size * CHAR_BIT; /* in bits */
-+ header->packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
-+ /*
-+ * We do not care about the records lost count, because the metadata
-+ * channel waits and retry.
-+ */
-+ (void) lib_ring_buffer_get_records_lost_full(&client_config, buf);
-+ records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
-+ records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
-+ WARN_ON_ONCE(records_lost != 0);
-+}
-+
-+static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
-+ int cpu, const char *name)
-+{
-+ return 0;
-+}
-+
-+static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
-+{
-+}
-+
-+static const struct lib_ring_buffer_config client_config = {
-+ .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
-+ .cb.record_header_size = client_record_header_size,
-+ .cb.subbuffer_header_size = client_packet_header_size,
-+ .cb.buffer_begin = client_buffer_begin,
-+ .cb.buffer_end = client_buffer_end,
-+ .cb.buffer_create = client_buffer_create,
-+ .cb.buffer_finalize = client_buffer_finalize,
-+
-+ .tsc_bits = 0,
-+ .alloc = RING_BUFFER_ALLOC_GLOBAL,
-+ .sync = RING_BUFFER_SYNC_GLOBAL,
-+ .mode = RING_BUFFER_MODE_TEMPLATE,
-+ .backend = RING_BUFFER_PAGE,
-+ .output = RING_BUFFER_OUTPUT_TEMPLATE,
-+ .oops = RING_BUFFER_OOPS_CONSISTENCY,
-+ .ipi = RING_BUFFER_IPI_BARRIER,
-+ .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
-+};
-+
-+static
-+struct channel *_channel_create(const char *name,
-+ struct lttng_channel *lttng_chan, void *buf_addr,
-+ size_t subbuf_size, size_t num_subbuf,
-+ unsigned int switch_timer_interval,
-+ unsigned int read_timer_interval)
-+{
-+ return channel_create(&client_config, name, lttng_chan, buf_addr,
-+ subbuf_size, num_subbuf, switch_timer_interval,
-+ read_timer_interval);
-+}
-+
-+static
-+void lttng_channel_destroy(struct channel *chan)
-+{
-+ channel_destroy(chan);
-+}
-+
-+static
-+struct lib_ring_buffer *lttng_buffer_read_open(struct channel *chan)
-+{
-+ struct lib_ring_buffer *buf;
-+
-+ buf = channel_get_ring_buffer(&client_config, chan, 0);
-+ if (!lib_ring_buffer_open_read(buf))
-+ return buf;
-+ return NULL;
-+}
-+
-+static
-+int lttng_buffer_has_read_closed_stream(struct channel *chan)
-+{
-+ struct lib_ring_buffer *buf;
-+ int cpu;
-+
-+ for_each_channel_cpu(cpu, chan) {
-+ buf = channel_get_ring_buffer(&client_config, chan, cpu);
-+ if (!atomic_long_read(&buf->active_readers))
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+static
-+void lttng_buffer_read_close(struct lib_ring_buffer *buf)
-+{
-+ lib_ring_buffer_release_read(buf);
-+}
-+
-+static
-+int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id)
-+{
-+ return lib_ring_buffer_reserve(&client_config, ctx);
-+}
-+
-+static
-+void lttng_event_commit(struct lib_ring_buffer_ctx *ctx)
-+{
-+ lib_ring_buffer_commit(&client_config, ctx);
-+}
-+
-+static
-+void lttng_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
-+ size_t len)
-+{
-+ lib_ring_buffer_write(&client_config, ctx, src, len);
-+}
-+
-+static
-+void lttng_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
-+ const void __user *src, size_t len)
-+{
-+ lib_ring_buffer_copy_from_user(&client_config, ctx, src, len);
-+}
-+
-+static
-+void lttng_event_memset(struct lib_ring_buffer_ctx *ctx,
-+ int c, size_t len)
-+{
-+ lib_ring_buffer_memset(&client_config, ctx, c, len);
-+}
-+
-+static
-+size_t lttng_packet_avail_size(struct channel *chan)
-+
-+{
-+ unsigned long o_begin;
-+ struct lib_ring_buffer *buf;
-+
-+ buf = chan->backend.buf; /* Only for global buffer ! */
-+ o_begin = v_read(&client_config, &buf->offset);
-+ if (subbuf_offset(o_begin, chan) != 0) {
-+ return chan->backend.subbuf_size - subbuf_offset(o_begin, chan);
-+ } else {
-+ return chan->backend.subbuf_size - subbuf_offset(o_begin, chan)
-+ - sizeof(struct metadata_packet_header);
-+ }
-+}
-+
-+static
-+wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
-+{
-+ struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
-+ chan, cpu);
-+ return &buf->write_wait;
-+}
-+
-+static
-+wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
-+{
-+ return &chan->hp_wait;
-+}
-+
-+static
-+int lttng_is_finalized(struct channel *chan)
-+{
-+ return lib_ring_buffer_channel_is_finalized(chan);
-+}
-+
-+static
-+int lttng_is_disabled(struct channel *chan)
-+{
-+ return lib_ring_buffer_channel_is_disabled(chan);
-+}
-+
-+static struct lttng_transport lttng_relay_transport = {
-+ .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
-+ .owner = THIS_MODULE,
-+ .ops = {
-+ .channel_create = _channel_create,
-+ .channel_destroy = lttng_channel_destroy,
-+ .buffer_read_open = lttng_buffer_read_open,
-+ .buffer_has_read_closed_stream =
-+ lttng_buffer_has_read_closed_stream,
-+ .buffer_read_close = lttng_buffer_read_close,
-+ .event_reserve = lttng_event_reserve,
-+ .event_commit = lttng_event_commit,
-+ .event_write_from_user = lttng_event_write_from_user,
-+ .event_memset = lttng_event_memset,
-+ .event_write = lttng_event_write,
-+ .packet_avail_size = lttng_packet_avail_size,
-+ .get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue,
-+ .get_hp_wait_queue = lttng_get_hp_wait_queue,
-+ .is_finalized = lttng_is_finalized,
-+ .is_disabled = lttng_is_disabled,
-+ },
-+};
-+
-+static int __init lttng_ring_buffer_client_init(void)
-+{
-+ /*
-+ * This vmalloc sync all also takes care of the lib ring buffer
-+ * vmalloc'd module pages when it is built as a module into LTTng.
-+ */
-+ wrapper_vmalloc_sync_all();
-+ lttng_transport_register(&lttng_relay_transport);
-+ return 0;
-+}
-+
-+module_init(lttng_ring_buffer_client_init);
-+
-+static void __exit lttng_ring_buffer_client_exit(void)
-+{
-+ lttng_transport_unregister(&lttng_relay_transport);
-+}
-+
-+module_exit(lttng_ring_buffer_client_exit);
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
-+ " client");
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-ring-buffer-metadata-mmap-client.c
-@@ -0,0 +1,33 @@
-+/*
-+ * lttng-ring-buffer-metadata-client.c
-+ *
-+ * LTTng lib ring buffer metadta client.
-+ *
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#include <linux/module.h>
-+#include "lttng-tracer.h"
-+
-+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
-+#define RING_BUFFER_MODE_TEMPLATE_STRING "metadata-mmap"
-+#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_MMAP
-+#include "lttng-ring-buffer-metadata-client.h"
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers");
-+MODULE_DESCRIPTION("LTTng Ring Buffer Metadata Client");
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-statedump-impl.c
-@@ -0,0 +1,385 @@
-+/*
-+ * lttng-statedump.c
-+ *
-+ * Linux Trace Toolkit Next Generation Kernel State Dump
-+ *
-+ * Copyright 2005 Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca>
-+ * Copyright 2006-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ *
-+ * Changes:
-+ * Eric Clement: Add listing of network IP interface
-+ * 2006, 2007 Mathieu Desnoyers Fix kernel threads
-+ * Various updates
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/netlink.h>
-+#include <linux/inet.h>
-+#include <linux/ip.h>
-+#include <linux/kthread.h>
-+#include <linux/proc_fs.h>
-+#include <linux/file.h>
-+#include <linux/interrupt.h>
-+#include <linux/irqnr.h>
-+#include <linux/cpu.h>
-+#include <linux/netdevice.h>
-+#include <linux/inetdevice.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/fdtable.h>
-+#include <linux/swap.h>
-+#include <linux/wait.h>
-+#include <linux/mutex.h>
-+
-+#include "lttng-events.h"
-+#include "wrapper/irqdesc.h"
-+
-+#ifdef CONFIG_GENERIC_HARDIRQS
-+#include <linux/irq.h>
-+#endif
-+
-+/* Define the tracepoints, but do not build the probes */
-+#define CREATE_TRACE_POINTS
-+#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
-+#define TRACE_INCLUDE_FILE lttng-statedump
-+#include "instrumentation/events/lttng-module/lttng-statedump.h"
-+
-+/*
-+ * Protected by the trace lock.
-+ */
-+static struct delayed_work cpu_work[NR_CPUS];
-+static DECLARE_WAIT_QUEUE_HEAD(statedump_wq);
-+static atomic_t kernel_threads_to_run;
-+
-+enum lttng_thread_type {
-+ LTTNG_USER_THREAD = 0,
-+ LTTNG_KERNEL_THREAD = 1,
-+};
-+
-+enum lttng_execution_mode {
-+ LTTNG_USER_MODE = 0,
-+ LTTNG_SYSCALL = 1,
-+ LTTNG_TRAP = 2,
-+ LTTNG_IRQ = 3,
-+ LTTNG_SOFTIRQ = 4,
-+ LTTNG_MODE_UNKNOWN = 5,
-+};
-+
-+enum lttng_execution_submode {
-+ LTTNG_NONE = 0,
-+ LTTNG_UNKNOWN = 1,
-+};
-+
-+enum lttng_process_status {
-+ LTTNG_UNNAMED = 0,
-+ LTTNG_WAIT_FORK = 1,
-+ LTTNG_WAIT_CPU = 2,
-+ LTTNG_EXIT = 3,
-+ LTTNG_ZOMBIE = 4,
-+ LTTNG_WAIT = 5,
-+ LTTNG_RUN = 6,
-+ LTTNG_DEAD = 7,
-+};
-+
-+#ifdef CONFIG_INET
-+static
-+void lttng_enumerate_device(struct lttng_session *session,
-+ struct net_device *dev)
-+{
-+ struct in_device *in_dev;
-+ struct in_ifaddr *ifa;
-+
-+ if (dev->flags & IFF_UP) {
-+ in_dev = in_dev_get(dev);
-+ if (in_dev) {
-+ for (ifa = in_dev->ifa_list; ifa != NULL;
-+ ifa = ifa->ifa_next) {
-+ trace_lttng_statedump_network_interface(
-+ session, dev, ifa);
-+ }
-+ in_dev_put(in_dev);
-+ }
-+ } else {
-+ trace_lttng_statedump_network_interface(
-+ session, dev, NULL);
-+ }
-+}
-+
-+static
-+int lttng_enumerate_network_ip_interface(struct lttng_session *session)
-+{
-+ struct net_device *dev;
-+
-+ read_lock(&dev_base_lock);
-+ for_each_netdev(&init_net, dev)
-+ lttng_enumerate_device(session, dev);
-+ read_unlock(&dev_base_lock);
-+
-+ return 0;
-+}
-+#else /* CONFIG_INET */
-+static inline
-+int lttng_enumerate_network_ip_interface(struct lttng_session *session)
-+{
-+ return 0;
-+}
-+#endif /* CONFIG_INET */
-+
-+
-+static
-+void lttng_enumerate_task_fd(struct lttng_session *session,
-+ struct task_struct *p, char *tmp)
-+{
-+ struct fdtable *fdt;
-+ struct file *filp;
-+ unsigned int i;
-+ const unsigned char *path;
-+
-+ task_lock(p);
-+ if (!p->files)
-+ goto unlock_task;
-+ spin_lock(&p->files->file_lock);
-+ fdt = files_fdtable(p->files);
-+ for (i = 0; i < fdt->max_fds; i++) {
-+ filp = fcheck_files(p->files, i);
-+ if (!filp)
-+ continue;
-+ path = d_path(&filp->f_path, tmp, PAGE_SIZE);
-+ /* Make sure we give at least some info */
-+ trace_lttng_statedump_file_descriptor(session, p, i,
-+ IS_ERR(path) ?
-+ filp->f_dentry->d_name.name :
-+ path);
-+ }
-+ spin_unlock(&p->files->file_lock);
-+unlock_task:
-+ task_unlock(p);
-+}
-+
-+static
-+int lttng_enumerate_file_descriptors(struct lttng_session *session)
-+{
-+ struct task_struct *p;
-+ char *tmp = (char *) __get_free_page(GFP_KERNEL);
-+
-+ /* Enumerate active file descriptors */
-+ rcu_read_lock();
-+ for_each_process(p)
-+ lttng_enumerate_task_fd(session, p, tmp);
-+ rcu_read_unlock();
-+ free_page((unsigned long) tmp);
-+ return 0;
-+}
-+
-+static
-+void lttng_enumerate_task_vm_maps(struct lttng_session *session,
-+ struct task_struct *p)
-+{
-+ struct mm_struct *mm;
-+ struct vm_area_struct *map;
-+ unsigned long ino;
-+
-+ /* get_task_mm does a task_lock... */
-+ mm = get_task_mm(p);
-+ if (!mm)
-+ return;
-+
-+ map = mm->mmap;
-+ if (map) {
-+ down_read(&mm->mmap_sem);
-+ while (map) {
-+ if (map->vm_file)
-+ ino = map->vm_file->f_dentry->d_inode->i_ino;
-+ else
-+ ino = 0;
-+ trace_lttng_statedump_vm_map(session, p, map, ino);
-+ map = map->vm_next;
-+ }
-+ up_read(&mm->mmap_sem);
-+ }
-+ mmput(mm);
-+}
-+
-+static
-+int lttng_enumerate_vm_maps(struct lttng_session *session)
-+{
-+ struct task_struct *p;
-+
-+ rcu_read_lock();
-+ for_each_process(p)
-+ lttng_enumerate_task_vm_maps(session, p);
-+ rcu_read_unlock();
-+ return 0;
-+}
-+
-+#ifdef CONFIG_GENERIC_HARDIRQS
-+
-+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
-+#define irq_desc_get_chip(desc) get_irq_desc_chip(desc)
-+#endif
-+
-+static
-+void lttng_list_interrupts(struct lttng_session *session)
-+{
-+ unsigned int irq;
-+ unsigned long flags = 0;
-+ struct irq_desc *desc;
-+
-+#define irq_to_desc wrapper_irq_to_desc
-+ /* needs irq_desc */
-+ for_each_irq_desc(irq, desc) {
-+ struct irqaction *action;
-+ const char *irq_chip_name =
-+ irq_desc_get_chip(desc)->name ? : "unnamed_irq_chip";
-+
-+ local_irq_save(flags);
-+ raw_spin_lock(&desc->lock);
-+ for (action = desc->action; action; action = action->next) {
-+ trace_lttng_statedump_interrupt(session,
-+ irq, irq_chip_name, action);
-+ }
-+ raw_spin_unlock(&desc->lock);
-+ local_irq_restore(flags);
-+ }
-+#undef irq_to_desc
-+}
-+#else
-+static inline
-+void list_interrupts(struct lttng_session *session)
-+{
-+}
-+#endif
-+
-+static
-+int lttng_enumerate_process_states(struct lttng_session *session)
-+{
-+ struct task_struct *g, *p;
-+
-+ rcu_read_lock();
-+ for_each_process(g) {
-+ p = g;
-+ do {
-+ enum lttng_execution_mode mode =
-+ LTTNG_MODE_UNKNOWN;
-+ enum lttng_execution_submode submode =
-+ LTTNG_UNKNOWN;
-+ enum lttng_process_status status;
-+ enum lttng_thread_type type;
-+
-+ task_lock(p);
-+ if (p->exit_state == EXIT_ZOMBIE)
-+ status = LTTNG_ZOMBIE;
-+ else if (p->exit_state == EXIT_DEAD)
-+ status = LTTNG_DEAD;
-+ else if (p->state == TASK_RUNNING) {
-+ /* Is this a forked child that has not run yet? */
-+ if (list_empty(&p->rt.run_list))
-+ status = LTTNG_WAIT_FORK;
-+ else
-+ /*
-+ * All tasks are considered as wait_cpu;
-+ * the viewer will sort out if the task
-+ * was really running at this time.
-+ */
-+ status = LTTNG_WAIT_CPU;
-+ } else if (p->state &
-+ (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) {
-+ /* Task is waiting for something to complete */
-+ status = LTTNG_WAIT;
-+ } else
-+ status = LTTNG_UNNAMED;
-+ submode = LTTNG_NONE;
-+
-+ /*
-+ * Verification of t->mm is to filter out kernel
-+ * threads; Viewer will further filter out if a
-+ * user-space thread was in syscall mode or not.
-+ */
-+ if (p->mm)
-+ type = LTTNG_USER_THREAD;
-+ else
-+ type = LTTNG_KERNEL_THREAD;
-+ trace_lttng_statedump_process_state(session,
-+ p, type, mode, submode, status);
-+ task_unlock(p);
-+ } while_each_thread(g, p);
-+ }
-+ rcu_read_unlock();
-+
-+ return 0;
-+}
-+
-+static
-+void lttng_statedump_work_func(struct work_struct *work)
-+{
-+ if (atomic_dec_and_test(&kernel_threads_to_run))
-+ /* If we are the last thread, wake up do_lttng_statedump */
-+ wake_up(&statedump_wq);
-+}
-+
-+static
-+int do_lttng_statedump(struct lttng_session *session)
-+{
-+ int cpu;
-+
-+ printk(KERN_DEBUG "LTT state dump thread start\n");
-+ trace_lttng_statedump_start(session);
-+ lttng_enumerate_process_states(session);
-+ lttng_enumerate_file_descriptors(session);
-+ lttng_enumerate_vm_maps(session);
-+ lttng_list_interrupts(session);
-+ lttng_enumerate_network_ip_interface(session);
-+
-+ /* TODO lttng_dump_idt_table(session); */
-+ /* TODO lttng_dump_softirq_vec(session); */
-+ /* TODO lttng_list_modules(session); */
-+ /* TODO lttng_dump_swap_files(session); */
-+
-+ /*
-+ * Fire off a work queue on each CPU. Their sole purpose in life
-+ * is to guarantee that each CPU has been in a state where is was in
-+ * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
-+ */
-+ get_online_cpus();
-+ atomic_set(&kernel_threads_to_run, num_online_cpus());
-+ for_each_online_cpu(cpu) {
-+ INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func);
-+ schedule_delayed_work_on(cpu, &cpu_work[cpu], 0);
-+ }
-+ /* Wait for all threads to run */
-+ __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) != 0));
-+ put_online_cpus();
-+ /* Our work is done */
-+ printk(KERN_DEBUG "LTT state dump end\n");
-+ trace_lttng_statedump_end(session);
-+ return 0;
-+}
-+
-+/*
-+ * Called with session mutex held.
-+ */
-+int lttng_statedump_start(struct lttng_session *session)
-+{
-+ printk(KERN_DEBUG "LTTng: state dump begin\n");
-+ return do_lttng_statedump(session);
-+}
-+EXPORT_SYMBOL_GPL(lttng_statedump_start);
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Jean-Hugues Deschenes");
-+MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Statedump");
---- a/drivers/staging/lttng/lttng-syscalls.c
-+++ b/drivers/staging/lttng/lttng-syscalls.c
-@@ -1,11 +1,23 @@
- /*
- * lttng-syscalls.c
- *
-- * Copyright 2010-2011 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
- * LTTng syscall probes.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/module.h>
-@@ -14,13 +26,12 @@
- #include <asm/ptrace.h>
- #include <asm/syscall.h>
-
--#include "ltt-events.h"
-+#include "lttng-events.h"
-
- #ifndef CONFIG_COMPAT
--static inline int is_compat_task(void)
--{
-- return 0;
--}
-+# ifndef is_compat_task
-+# define is_compat_task() (0)
-+# endif
- #endif
-
- static
-@@ -141,7 +152,7 @@ const struct trace_syscall_entry compat_
-
- #undef CREATE_SYSCALL_TABLE
-
--static void syscall_entry_unknown(struct ltt_event *event,
-+static void syscall_entry_unknown(struct lttng_event *event,
- struct pt_regs *regs, unsigned int id)
- {
- unsigned long args[UNKNOWN_SYSCALL_NRARGS];
-@@ -155,8 +166,8 @@ static void syscall_entry_unknown(struct
-
- void syscall_entry_probe(void *__data, struct pt_regs *regs, long id)
- {
-- struct ltt_channel *chan = __data;
-- struct ltt_event *event, *unknown_event;
-+ struct lttng_channel *chan = __data;
-+ struct lttng_event *event, *unknown_event;
- const struct trace_syscall_entry *table, *entry;
- size_t table_len;
-
-@@ -275,7 +286,7 @@ void syscall_entry_probe(void *__data, s
- /* noinline to diminish caller stack size */
- static
- int fill_table(const struct trace_syscall_entry *table, size_t table_len,
-- struct ltt_event **chan_table, struct ltt_channel *chan, void *filter)
-+ struct lttng_event **chan_table, struct lttng_channel *chan, void *filter)
- {
- const struct lttng_event_desc *desc;
- unsigned int i;
-@@ -296,10 +307,10 @@ int fill_table(const struct trace_syscal
- if (chan_table[i])
- continue;
- memset(&ev, 0, sizeof(ev));
-- strncpy(ev.name, desc->name, LTTNG_SYM_NAME_LEN);
-- ev.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
-+ strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN);
-+ ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
- ev.instrumentation = LTTNG_KERNEL_NOOP;
-- chan_table[i] = ltt_event_create(chan, &ev, filter,
-+ chan_table[i] = lttng_event_create(chan, &ev, filter,
- desc);
- if (!chan_table[i]) {
- /*
-@@ -314,7 +325,7 @@ int fill_table(const struct trace_syscal
- return 0;
- }
-
--int lttng_syscalls_register(struct ltt_channel *chan, void *filter)
-+int lttng_syscalls_register(struct lttng_channel *chan, void *filter)
- {
- struct lttng_kernel_event ev;
- int ret;
-@@ -323,7 +334,7 @@ int lttng_syscalls_register(struct ltt_c
-
- if (!chan->sc_table) {
- /* create syscall table mapping syscall to events */
-- chan->sc_table = kzalloc(sizeof(struct ltt_event *)
-+ chan->sc_table = kzalloc(sizeof(struct lttng_event *)
- * ARRAY_SIZE(sc_table), GFP_KERNEL);
- if (!chan->sc_table)
- return -ENOMEM;
-@@ -332,7 +343,7 @@ int lttng_syscalls_register(struct ltt_c
- #ifdef CONFIG_COMPAT
- if (!chan->compat_sc_table) {
- /* create syscall table mapping compat syscall to events */
-- chan->compat_sc_table = kzalloc(sizeof(struct ltt_event *)
-+ chan->compat_sc_table = kzalloc(sizeof(struct lttng_event *)
- * ARRAY_SIZE(compat_sc_table), GFP_KERNEL);
- if (!chan->compat_sc_table)
- return -ENOMEM;
-@@ -343,10 +354,10 @@ int lttng_syscalls_register(struct ltt_c
- &__event_desc___sys_unknown;
-
- memset(&ev, 0, sizeof(ev));
-- strncpy(ev.name, desc->name, LTTNG_SYM_NAME_LEN);
-- ev.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
-+ strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN);
-+ ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
- ev.instrumentation = LTTNG_KERNEL_NOOP;
-- chan->sc_unknown = ltt_event_create(chan, &ev, filter,
-+ chan->sc_unknown = lttng_event_create(chan, &ev, filter,
- desc);
- if (!chan->sc_unknown) {
- return -EINVAL;
-@@ -358,10 +369,10 @@ int lttng_syscalls_register(struct ltt_c
- &__event_desc___compat_sys_unknown;
-
- memset(&ev, 0, sizeof(ev));
-- strncpy(ev.name, desc->name, LTTNG_SYM_NAME_LEN);
-- ev.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
-+ strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN);
-+ ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
- ev.instrumentation = LTTNG_KERNEL_NOOP;
-- chan->sc_compat_unknown = ltt_event_create(chan, &ev, filter,
-+ chan->sc_compat_unknown = lttng_event_create(chan, &ev, filter,
- desc);
- if (!chan->sc_compat_unknown) {
- return -EINVAL;
-@@ -373,10 +384,10 @@ int lttng_syscalls_register(struct ltt_c
- &__event_desc___exit_syscall;
-
- memset(&ev, 0, sizeof(ev));
-- strncpy(ev.name, desc->name, LTTNG_SYM_NAME_LEN);
-- ev.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
-+ strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN);
-+ ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
- ev.instrumentation = LTTNG_KERNEL_NOOP;
-- chan->sc_exit = ltt_event_create(chan, &ev, filter,
-+ chan->sc_exit = lttng_event_create(chan, &ev, filter,
- desc);
- if (!chan->sc_exit) {
- return -EINVAL;
-@@ -414,7 +425,7 @@ int lttng_syscalls_register(struct ltt_c
- /*
- * Only called at session destruction.
- */
--int lttng_syscalls_unregister(struct ltt_channel *chan)
-+int lttng_syscalls_unregister(struct lttng_channel *chan)
- {
- int ret;
-
-@@ -429,7 +440,7 @@ int lttng_syscalls_unregister(struct ltt
- (void *) syscall_entry_probe, chan);
- if (ret)
- return ret;
-- /* ltt_event destroy will be performed by ltt_session_destroy() */
-+ /* lttng_event destroy will be performed by lttng_session_destroy() */
- kfree(chan->sc_table);
- #ifdef CONFIG_COMPAT
- kfree(chan->compat_sc_table);
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-tracer-core.h
-@@ -0,0 +1,41 @@
-+#ifndef LTTNG_TRACER_CORE_H
-+#define LTTNG_TRACER_CORE_H
-+
-+/*
-+ * lttng-tracer-core.h
-+ *
-+ * This contains the core definitions for the Linux Trace Toolkit Next
-+ * Generation tracer.
-+ *
-+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#include <linux/list.h>
-+#include <linux/percpu.h>
-+
-+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-+/* Align data on its natural alignment */
-+#define RING_BUFFER_ALIGN
-+#endif
-+
-+#include "wrapper/ringbuffer/config.h"
-+
-+struct lttng_session;
-+struct lttng_channel;
-+struct lttng_event;
-+
-+#endif /* LTTNG_TRACER_CORE_H */
---- /dev/null
-+++ b/drivers/staging/lttng/lttng-tracer.h
-@@ -0,0 +1,80 @@
-+#ifndef _LTTNG_TRACER_H
-+#define _LTTNG_TRACER_H
-+
-+/*
-+ * lttng-tracer.h
-+ *
-+ * This contains the definitions for the Linux Trace Toolkit Next
-+ * Generation tracer.
-+ *
-+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#include <stdarg.h>
-+#include <linux/types.h>
-+#include <linux/limits.h>
-+#include <linux/list.h>
-+#include <linux/cache.h>
-+#include <linux/timex.h>
-+#include <linux/wait.h>
-+#include <asm/atomic.h>
-+#include <asm/local.h>
-+
-+#include "wrapper/trace-clock.h"
-+#include "lttng-tracer-core.h"
-+#include "lttng-events.h"
-+
-+#define LTTNG_MODULES_MAJOR_VERSION 2
-+#define LTTNG_MODULES_MINOR_VERSION 0
-+#define LTTNG_MODULES_PATCHLEVEL_VERSION 1
-+
-+#define LTTNG_VERSION_NAME "Annedd'ale"
-+#define LTTNG_VERSION_DESCRIPTION \
-+ "New type of beer, 100% from Quebec, flavored with sapin beaumier needles, with a touch of hops."
-+
-+#ifndef CHAR_BIT
-+#define CHAR_BIT 8
-+#endif
-+
-+/* Number of bytes to log with a read/write event */
-+#define LTTNG_LOG_RW_SIZE 32L
-+#define LTTNG_MAX_SMALL_SIZE 0xFFFFU
-+
-+#ifdef RING_BUFFER_ALIGN
-+#define lttng_alignof(type) __alignof__(type)
-+#else
-+#define lttng_alignof(type) 1
-+#endif
-+
-+/* Tracer properties */
-+#define CTF_MAGIC_NUMBER 0xC1FC1FC1
-+#define TSDL_MAGIC_NUMBER 0x75D11D57
-+
-+/* CTF specification version followed */
-+#define CTF_SPEC_MAJOR 1
-+#define CTF_SPEC_MINOR 8
-+
-+/*
-+ * Number of milliseconds to retry before failing metadata writes on buffer full
-+ * condition. (10 seconds)
-+ */
-+#define LTTNG_METADATA_TIMEOUT_MSEC 10000
-+
-+#define LTTNG_RFLAG_EXTENDED RING_BUFFER_RFLAG_END
-+#define LTTNG_RFLAG_END (LTTNG_RFLAG_EXTENDED << 1)
-+
-+#endif /* _LTTNG_TRACER_H */
---- a/drivers/staging/lttng/probes/Makefile
-+++ b/drivers/staging/lttng/probes/Makefile
-@@ -9,6 +9,10 @@ obj-m += lttng-probe-lttng.o
-
- obj-m += lttng-probe-sched.o
- obj-m += lttng-probe-irq.o
-+obj-m += lttng-probe-signal.o
-+obj-m += lttng-probe-timer.o
-+
-+obj-m += lttng-probe-statedump.o
-
- ifneq ($(CONFIG_KVM),)
- obj-m += lttng-probe-kvm.o
---- a/drivers/staging/lttng/probes/define_trace.h
-+++ b/drivers/staging/lttng/probes/define_trace.h
-@@ -2,9 +2,21 @@
- * define_trace.h
- *
- * Copyright (C) 2009 Steven Rostedt <rostedt@goodmis.org>
-- * Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- /*
---- a/drivers/staging/lttng/probes/lttng-events-reset.h
-+++ b/drivers/staging/lttng/probes/lttng-events-reset.h
-@@ -1,9 +1,21 @@
- /*
- * lttng-events-reset.h
- *
-- * Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- /* Reset macros used within TRACE_EVENT to "nothing" */
---- a/drivers/staging/lttng/probes/lttng-events.h
-+++ b/drivers/staging/lttng/probes/lttng-events.h
-@@ -2,9 +2,21 @@
- * lttng-events.h
- *
- * Copyright (C) 2009 Steven Rostedt <rostedt@goodmis.org>
-- * Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/debugfs.h>
-@@ -12,8 +24,8 @@
- #include "lttng-types.h"
- #include "../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
- #include "../wrapper/ringbuffer/frontend_types.h"
--#include "../ltt-events.h"
--#include "../ltt-tracer-core.h"
-+#include "../lttng-events.h"
-+#include "../lttng-tracer-core.h"
-
- /*
- * Macro declarations used for all stages.
-@@ -319,19 +331,19 @@ static __used struct lttng_probe_desc TP
-
- #undef __field_full
- #define __field_full(_type, _item, _order, _base) \
-- __event_len += lib_ring_buffer_align(__event_len, ltt_alignof(_type)); \
-+ __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
- __event_len += sizeof(_type);
-
- #undef __array_enc_ext
- #define __array_enc_ext(_type, _item, _length, _order, _base, _encoding) \
-- __event_len += lib_ring_buffer_align(__event_len, ltt_alignof(_type)); \
-+ __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
- __event_len += sizeof(_type) * (_length);
-
- #undef __dynamic_array_enc_ext
- #define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
-- __event_len += lib_ring_buffer_align(__event_len, ltt_alignof(u32)); \
-+ __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(u32)); \
- __event_len += sizeof(u32); \
-- __event_len += lib_ring_buffer_align(__event_len, ltt_alignof(_type)); \
-+ __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
- __dynamic_len[__dynamic_len_idx] = (_length); \
- __event_len += sizeof(_type) * __dynamic_len[__dynamic_len_idx]; \
- __dynamic_len_idx++;
-@@ -382,16 +394,16 @@ static inline size_t __event_get_size__#
-
- #undef __field_full
- #define __field_full(_type, _item, _order, _base) \
-- __event_align = max_t(size_t, __event_align, ltt_alignof(_type));
-+ __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
-
- #undef __array_enc_ext
- #define __array_enc_ext(_type, _item, _length, _order, _base, _encoding) \
-- __event_align = max_t(size_t, __event_align, ltt_alignof(_type));
-+ __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
-
- #undef __dynamic_array_enc_ext
- #define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
-- __event_align = max_t(size_t, __event_align, ltt_alignof(u32)); \
-- __event_align = max_t(size_t, __event_align, ltt_alignof(_type));
-+ __event_align = max_t(size_t, __event_align, lttng_alignof(u32)); \
-+ __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
-
- #undef __string
- #define __string(_item, _src)
-@@ -506,7 +518,7 @@ __end_field_##_item:
- __assign_##dest: \
- { \
- __typeof__(__typemap.dest) __tmp = (src); \
-- lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__tmp)); \
-+ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__tmp)); \
- __chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp));\
- } \
- goto __end_field_##dest;
-@@ -516,7 +528,7 @@ __assign_##dest: \
- __assign_##dest: \
- if (0) \
- (void) __typemap.dest; \
-- lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest)); \
-+ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__typemap.dest)); \
- __chan->ops->event_write(&__ctx, src, len); \
- goto __end_field_##dest;
-
-@@ -525,12 +537,12 @@ __assign_##dest: \
- __assign_##dest##_1: \
- { \
- u32 __tmpl = __dynamic_len[__dynamic_len_idx]; \
-- lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(u32)); \
-+ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(u32)); \
- __chan->ops->event_write(&__ctx, &__tmpl, sizeof(u32)); \
- } \
- goto __end_field_##dest##_1; \
- __assign_##dest##_2: \
-- lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest)); \
-+ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__typemap.dest)); \
- __chan->ops->event_write(&__ctx, src, \
- sizeof(__typemap.dest) * __get_dynamic_array_len(dest));\
- goto __end_field_##dest##_2;
-@@ -540,7 +552,7 @@ __assign_##dest##_2: \
- __assign_##dest: \
- if (0) \
- (void) __typemap.dest; \
-- lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest)); \
-+ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__typemap.dest)); \
- __chan->ops->event_write_from_user(&__ctx, src, len); \
- goto __end_field_##dest;
-
-@@ -555,7 +567,7 @@ __assign_##dest##_2: \
- \
- if (0) \
- (void) __typemap.dest; \
-- lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest));\
-+ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__typemap.dest));\
- __ustrlen = __get_dynamic_array_len(dest); \
- if (likely(__ustrlen > 1)) { \
- __chan->ops->event_write_from_user(&__ctx, src, \
-@@ -592,12 +604,23 @@ __assign_##dest##_2: \
- #undef TP_fast_assign
- #define TP_fast_assign(args...) args
-
-+/*
-+ * For state dump, check that "session" argument (mandatory) matches the
-+ * session this event belongs to. Ensures that we write state dump data only
-+ * into the started session, not into all sessions.
-+ */
-+#ifdef TP_SESSION_CHECK
-+#define _TP_SESSION_CHECK(session, csession) (session == csession)
-+#else /* TP_SESSION_CHECK */
-+#define _TP_SESSION_CHECK(session, csession) 1
-+#endif /* TP_SESSION_CHECK */
-+
- #undef DECLARE_EVENT_CLASS
- #define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
- static void __event_probe__##_name(void *__data, _proto) \
- { \
-- struct ltt_event *__event = __data; \
-- struct ltt_channel *__chan = __event->chan; \
-+ struct lttng_event *__event = __data; \
-+ struct lttng_channel *__chan = __event->chan; \
- struct lib_ring_buffer_ctx __ctx; \
- size_t __event_len, __event_align; \
- size_t __dynamic_len_idx = 0; \
-@@ -605,8 +628,12 @@ static void __event_probe__##_name(void
- struct __event_typemap__##_name __typemap; \
- int __ret; \
- \
-- if (0) \
-+ if (0) { \
- (void) __dynamic_len_idx; /* don't warn if unused */ \
-+ (void) __typemap; /* don't warn if unused */ \
-+ } \
-+ if (!_TP_SESSION_CHECK(session, __chan->session)) \
-+ return; \
- if (unlikely(!ACCESS_ONCE(__chan->session->active))) \
- return; \
- if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
-@@ -632,12 +659,14 @@ static void __event_probe__##_name(void
- #define DECLARE_EVENT_CLASS_NOARGS(_name, _tstruct, _assign, _print) \
- static void __event_probe__##_name(void *__data) \
- { \
-- struct ltt_event *__event = __data; \
-- struct ltt_channel *__chan = __event->chan; \
-+ struct lttng_event *__event = __data; \
-+ struct lttng_channel *__chan = __event->chan; \
- struct lib_ring_buffer_ctx __ctx; \
- size_t __event_len, __event_align; \
- int __ret; \
- \
-+ if (!_TP_SESSION_CHECK(session, __chan->session)) \
-+ return; \
- if (unlikely(!ACCESS_ONCE(__chan->session->active))) \
- return; \
- if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
-@@ -680,14 +709,14 @@ static void __event_probe__##_name(void
- static int TP_ID(__lttng_events_init__, TRACE_SYSTEM)(void)
- {
- wrapper_vmalloc_sync_all();
-- return ltt_probe_register(&TP_ID(__probe_desc___, TRACE_SYSTEM));
-+ return lttng_probe_register(&TP_ID(__probe_desc___, TRACE_SYSTEM));
- }
-
- module_init_eval(__lttng_events_init__, TRACE_SYSTEM);
-
- static void TP_ID(__lttng_events_exit__, TRACE_SYSTEM)(void)
- {
-- ltt_probe_unregister(&TP_ID(__probe_desc___, TRACE_SYSTEM));
-+ lttng_probe_unregister(&TP_ID(__probe_desc___, TRACE_SYSTEM));
- }
-
- module_exit_eval(__lttng_events_exit__, TRACE_SYSTEM);
---- a/drivers/staging/lttng/probes/lttng-ftrace.c
-+++ b/drivers/staging/lttng/probes/lttng-ftrace.c
-@@ -1,10 +1,23 @@
- /*
-- * (C) Copyright 2009-2011 -
-- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * probes/lttng-ftrace.c
- *
- * LTTng function tracer integration module.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- /*
-@@ -20,17 +33,17 @@
- #include <linux/module.h>
- #include <linux/ftrace.h>
- #include <linux/slab.h>
--#include "../ltt-events.h"
-+#include "../lttng-events.h"
- #include "../wrapper/ringbuffer/frontend_types.h"
- #include "../wrapper/ftrace.h"
- #include "../wrapper/vmalloc.h"
--#include "../ltt-tracer.h"
-+#include "../lttng-tracer.h"
-
- static
- void lttng_ftrace_handler(unsigned long ip, unsigned long parent_ip, void **data)
- {
-- struct ltt_event *event = *data;
-- struct ltt_channel *chan = event->chan;
-+ struct lttng_event *event = *data;
-+ struct lttng_channel *chan = event->chan;
- struct lib_ring_buffer_ctx ctx;
- struct {
- unsigned long ip;
-@@ -46,13 +59,13 @@ void lttng_ftrace_handler(unsigned long
- return;
-
- lib_ring_buffer_ctx_init(&ctx, chan->chan, event,
-- sizeof(payload), ltt_alignof(payload), -1);
-+ sizeof(payload), lttng_alignof(payload), -1);
- ret = chan->ops->event_reserve(&ctx, event->id);
- if (ret < 0)
- return;
- payload.ip = ip;
- payload.parent_ip = parent_ip;
-- lib_ring_buffer_align_ctx(&ctx, ltt_alignof(payload));
-+ lib_ring_buffer_align_ctx(&ctx, lttng_alignof(payload));
- chan->ops->event_write(&ctx, &payload, sizeof(payload));
- chan->ops->event_commit(&ctx);
- return;
-@@ -62,7 +75,7 @@ void lttng_ftrace_handler(unsigned long
- * Create event description
- */
- static
--int lttng_create_ftrace_event(const char *name, struct ltt_event *event)
-+int lttng_create_ftrace_event(const char *name, struct lttng_event *event)
- {
- struct lttng_event_field *fields;
- struct lttng_event_desc *desc;
-@@ -86,7 +99,7 @@ int lttng_create_ftrace_event(const char
- fields[0].name = "ip";
- fields[0].type.atype = atype_integer;
- fields[0].type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
-- fields[0].type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
-+ fields[0].type.u.basic.integer.alignment = lttng_alignof(unsigned long) * CHAR_BIT;
- fields[0].type.u.basic.integer.signedness = is_signed_type(unsigned long);
- fields[0].type.u.basic.integer.reverse_byte_order = 0;
- fields[0].type.u.basic.integer.base = 16;
-@@ -95,7 +108,7 @@ int lttng_create_ftrace_event(const char
- fields[1].name = "parent_ip";
- fields[1].type.atype = atype_integer;
- fields[1].type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
-- fields[1].type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
-+ fields[1].type.u.basic.integer.alignment = lttng_alignof(unsigned long) * CHAR_BIT;
- fields[1].type.u.basic.integer.signedness = is_signed_type(unsigned long);
- fields[1].type.u.basic.integer.reverse_byte_order = 0;
- fields[1].type.u.basic.integer.base = 16;
-@@ -120,7 +133,7 @@ struct ftrace_probe_ops lttng_ftrace_ops
-
- int lttng_ftrace_register(const char *name,
- const char *symbol_name,
-- struct ltt_event *event)
-+ struct lttng_event *event)
- {
- int ret;
-
-@@ -151,14 +164,14 @@ error:
- }
- EXPORT_SYMBOL_GPL(lttng_ftrace_register);
-
--void lttng_ftrace_unregister(struct ltt_event *event)
-+void lttng_ftrace_unregister(struct lttng_event *event)
- {
- wrapper_unregister_ftrace_function_probe(event->u.ftrace.symbol_name,
- &lttng_ftrace_ops, event);
- }
- EXPORT_SYMBOL_GPL(lttng_ftrace_unregister);
-
--void lttng_ftrace_destroy_private(struct ltt_event *event)
-+void lttng_ftrace_destroy_private(struct lttng_event *event)
- {
- kfree(event->u.ftrace.symbol_name);
- kfree(event->desc->fields);
---- a/drivers/staging/lttng/probes/lttng-kprobes.c
-+++ b/drivers/staging/lttng/probes/lttng-kprobes.c
-@@ -1,26 +1,39 @@
- /*
-- * (C) Copyright 2009-2011 -
-- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * probes/lttng-kprobes.c
- *
- * LTTng kprobes integration module.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/module.h>
- #include <linux/kprobes.h>
- #include <linux/slab.h>
--#include "../ltt-events.h"
-+#include "../lttng-events.h"
- #include "../wrapper/ringbuffer/frontend_types.h"
- #include "../wrapper/vmalloc.h"
--#include "../ltt-tracer.h"
-+#include "../lttng-tracer.h"
-
- static
- int lttng_kprobes_handler_pre(struct kprobe *p, struct pt_regs *regs)
- {
-- struct ltt_event *event =
-- container_of(p, struct ltt_event, u.kprobe.kp);
-- struct ltt_channel *chan = event->chan;
-+ struct lttng_event *event =
-+ container_of(p, struct lttng_event, u.kprobe.kp);
-+ struct lttng_channel *chan = event->chan;
- struct lib_ring_buffer_ctx ctx;
- int ret;
- unsigned long data = (unsigned long) p->addr;
-@@ -33,11 +46,11 @@ int lttng_kprobes_handler_pre(struct kpr
- return 0;
-
- lib_ring_buffer_ctx_init(&ctx, chan->chan, event, sizeof(data),
-- ltt_alignof(data), -1);
-+ lttng_alignof(data), -1);
- ret = chan->ops->event_reserve(&ctx, event->id);
- if (ret < 0)
- return 0;
-- lib_ring_buffer_align_ctx(&ctx, ltt_alignof(data));
-+ lib_ring_buffer_align_ctx(&ctx, lttng_alignof(data));
- chan->ops->event_write(&ctx, &data, sizeof(data));
- chan->ops->event_commit(&ctx);
- return 0;
-@@ -47,7 +60,7 @@ int lttng_kprobes_handler_pre(struct kpr
- * Create event description
- */
- static
--int lttng_create_kprobe_event(const char *name, struct ltt_event *event)
-+int lttng_create_kprobe_event(const char *name, struct lttng_event *event)
- {
- struct lttng_event_field *field;
- struct lttng_event_desc *desc;
-@@ -71,7 +84,7 @@ int lttng_create_kprobe_event(const char
- field->name = "ip";
- field->type.atype = atype_integer;
- field->type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
-- field->type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
-+ field->type.u.basic.integer.alignment = lttng_alignof(unsigned long) * CHAR_BIT;
- field->type.u.basic.integer.signedness = is_signed_type(unsigned long);
- field->type.u.basic.integer.reverse_byte_order = 0;
- field->type.u.basic.integer.base = 16;
-@@ -92,7 +105,7 @@ int lttng_kprobes_register(const char *n
- const char *symbol_name,
- uint64_t offset,
- uint64_t addr,
-- struct ltt_event *event)
-+ struct lttng_event *event)
- {
- int ret;
-
-@@ -107,14 +120,14 @@ int lttng_kprobes_register(const char *n
- event->u.kprobe.kp.pre_handler = lttng_kprobes_handler_pre;
- if (symbol_name) {
- event->u.kprobe.symbol_name =
-- kzalloc(LTTNG_SYM_NAME_LEN * sizeof(char),
-+ kzalloc(LTTNG_KERNEL_SYM_NAME_LEN * sizeof(char),
- GFP_KERNEL);
- if (!event->u.kprobe.symbol_name) {
- ret = -ENOMEM;
- goto name_error;
- }
- memcpy(event->u.kprobe.symbol_name, symbol_name,
-- LTTNG_SYM_NAME_LEN * sizeof(char));
-+ LTTNG_KERNEL_SYM_NAME_LEN * sizeof(char));
- event->u.kprobe.kp.symbol_name =
- event->u.kprobe.symbol_name;
- }
-@@ -144,13 +157,13 @@ error:
- }
- EXPORT_SYMBOL_GPL(lttng_kprobes_register);
-
--void lttng_kprobes_unregister(struct ltt_event *event)
-+void lttng_kprobes_unregister(struct lttng_event *event)
- {
- unregister_kprobe(&event->u.kprobe.kp);
- }
- EXPORT_SYMBOL_GPL(lttng_kprobes_unregister);
-
--void lttng_kprobes_destroy_private(struct ltt_event *event)
-+void lttng_kprobes_destroy_private(struct lttng_event *event)
- {
- kfree(event->u.kprobe.symbol_name);
- kfree(event->desc->fields);
---- a/drivers/staging/lttng/probes/lttng-kretprobes.c
-+++ b/drivers/staging/lttng/probes/lttng-kretprobes.c
-@@ -1,20 +1,33 @@
- /*
-- * (C) Copyright 2009-2011 -
-- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * probes/lttng-kretprobes.c
- *
- * LTTng kretprobes integration module.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/module.h>
- #include <linux/kprobes.h>
- #include <linux/slab.h>
- #include <linux/kref.h>
--#include "../ltt-events.h"
-+#include "../lttng-events.h"
- #include "../wrapper/ringbuffer/frontend_types.h"
- #include "../wrapper/vmalloc.h"
--#include "../ltt-tracer.h"
-+#include "../lttng-tracer.h"
-
- enum lttng_kretprobe_type {
- EVENT_ENTRY = 0,
-@@ -23,7 +36,7 @@ enum lttng_kretprobe_type {
-
- struct lttng_krp {
- struct kretprobe krp;
-- struct ltt_event *event[2]; /* ENTRY and RETURN */
-+ struct lttng_event *event[2]; /* ENTRY and RETURN */
- struct kref kref_register;
- struct kref kref_alloc;
- };
-@@ -35,9 +48,9 @@ int _lttng_kretprobes_handler(struct kre
- {
- struct lttng_krp *lttng_krp =
- container_of(krpi->rp, struct lttng_krp, krp);
-- struct ltt_event *event =
-+ struct lttng_event *event =
- lttng_krp->event[type];
-- struct ltt_channel *chan = event->chan;
-+ struct lttng_channel *chan = event->chan;
- struct lib_ring_buffer_ctx ctx;
- int ret;
- struct {
-@@ -56,11 +69,11 @@ int _lttng_kretprobes_handler(struct kre
- payload.parent_ip = (unsigned long) krpi->ret_addr;
-
- lib_ring_buffer_ctx_init(&ctx, chan->chan, event, sizeof(payload),
-- ltt_alignof(payload), -1);
-+ lttng_alignof(payload), -1);
- ret = chan->ops->event_reserve(&ctx, event->id);
- if (ret < 0)
- return 0;
-- lib_ring_buffer_align_ctx(&ctx, ltt_alignof(payload));
-+ lib_ring_buffer_align_ctx(&ctx, lttng_alignof(payload));
- chan->ops->event_write(&ctx, &payload, sizeof(payload));
- chan->ops->event_commit(&ctx);
- return 0;
-@@ -84,7 +97,7 @@ int lttng_kretprobes_handler_return(stru
- * Create event description
- */
- static
--int lttng_create_kprobe_event(const char *name, struct ltt_event *event,
-+int lttng_create_kprobe_event(const char *name, struct lttng_event *event,
- enum lttng_kretprobe_type type)
- {
- struct lttng_event_field *fields;
-@@ -125,7 +138,7 @@ int lttng_create_kprobe_event(const char
- fields[0].name = "ip";
- fields[0].type.atype = atype_integer;
- fields[0].type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
-- fields[0].type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
-+ fields[0].type.u.basic.integer.alignment = lttng_alignof(unsigned long) * CHAR_BIT;
- fields[0].type.u.basic.integer.signedness = is_signed_type(unsigned long);
- fields[0].type.u.basic.integer.reverse_byte_order = 0;
- fields[0].type.u.basic.integer.base = 16;
-@@ -134,7 +147,7 @@ int lttng_create_kprobe_event(const char
- fields[1].name = "parent_ip";
- fields[1].type.atype = atype_integer;
- fields[1].type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
-- fields[1].type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
-+ fields[1].type.u.basic.integer.alignment = lttng_alignof(unsigned long) * CHAR_BIT;
- fields[1].type.u.basic.integer.signedness = is_signed_type(unsigned long);
- fields[1].type.u.basic.integer.reverse_byte_order = 0;
- fields[1].type.u.basic.integer.base = 16;
-@@ -156,8 +169,8 @@ int lttng_kretprobes_register(const char
- const char *symbol_name,
- uint64_t offset,
- uint64_t addr,
-- struct ltt_event *event_entry,
-- struct ltt_event *event_return)
-+ struct lttng_event *event_entry,
-+ struct lttng_event *event_return)
- {
- int ret;
- struct lttng_krp *lttng_krp;
-@@ -247,7 +260,7 @@ void _lttng_kretprobes_unregister_releas
- unregister_kretprobe(&lttng_krp->krp);
- }
-
--void lttng_kretprobes_unregister(struct ltt_event *event)
-+void lttng_kretprobes_unregister(struct lttng_event *event)
- {
- kref_put(&event->u.kretprobe.lttng_krp->kref_register,
- _lttng_kretprobes_unregister_release);
-@@ -262,7 +275,7 @@ void _lttng_kretprobes_release(struct kr
- kfree(lttng_krp->krp.kp.symbol_name);
- }
-
--void lttng_kretprobes_destroy_private(struct ltt_event *event)
-+void lttng_kretprobes_destroy_private(struct lttng_event *event)
- {
- kfree(event->desc->fields);
- kfree(event->desc->name);
---- a/drivers/staging/lttng/probes/lttng-probe-block.c
-+++ b/drivers/staging/lttng/probes/lttng-probe-block.c
-@@ -1,11 +1,23 @@
- /*
- * probes/lttng-probe-block.c
- *
-- * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
- * LTTng block probes.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/module.h>
---- a/drivers/staging/lttng/probes/lttng-probe-irq.c
-+++ b/drivers/staging/lttng/probes/lttng-probe-irq.c
-@@ -1,11 +1,23 @@
- /*
- * probes/lttng-probe-irq.c
- *
-- * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
- * LTTng irq probes.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/module.h>
---- a/drivers/staging/lttng/probes/lttng-probe-kvm.c
-+++ b/drivers/staging/lttng/probes/lttng-probe-kvm.c
-@@ -1,11 +1,23 @@
- /*
- * probes/lttng-probe-kvm.c
- *
-- * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
- * LTTng kvm probes.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/module.h>
---- a/drivers/staging/lttng/probes/lttng-probe-lttng.c
-+++ b/drivers/staging/lttng/probes/lttng-probe-lttng.c
-@@ -1,11 +1,23 @@
- /*
- * probes/lttng-probe-core.c
- *
-- * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
- * LTTng core probes.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/module.h>
---- a/drivers/staging/lttng/probes/lttng-probe-sched.c
-+++ b/drivers/staging/lttng/probes/lttng-probe-sched.c
-@@ -1,11 +1,23 @@
- /*
- * probes/lttng-probe-sched.c
- *
-- * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
- * LTTng sched probes.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/module.h>
---- /dev/null
-+++ b/drivers/staging/lttng/probes/lttng-probe-signal.c
-@@ -0,0 +1,42 @@
-+/*
-+ * probes/lttng-probe-signal.c
-+ *
-+ * LTTng signal probes.
-+ *
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#include <linux/module.h>
-+
-+/*
-+ * Create the tracepoint static inlines from the kernel to validate that our
-+ * trace event macros match the kernel we run on.
-+ */
-+#include <trace/events/signal.h>
-+
-+/*
-+ * Create LTTng tracepoint probes.
-+ */
-+#define LTTNG_PACKAGE_BUILD
-+#define CREATE_TRACE_POINTS
-+#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
-+
-+#include "../instrumentation/events/lttng-module/signal.h"
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-+MODULE_DESCRIPTION("LTTng signal probes");
---- /dev/null
-+++ b/drivers/staging/lttng/probes/lttng-probe-statedump.c
-@@ -0,0 +1,45 @@
-+/*
-+ * probes/lttng-probe-statedump.c
-+ *
-+ * LTTng statedump probes.
-+ *
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/netlink.h>
-+#include <linux/inet.h>
-+#include <linux/ip.h>
-+#include <linux/netdevice.h>
-+#include <linux/inetdevice.h>
-+#include "../lttng-events.h"
-+
-+/*
-+ * Create LTTng tracepoint probes.
-+ */
-+#define LTTNG_PACKAGE_BUILD
-+#define CREATE_TRACE_POINTS
-+#define TP_SESSION_CHECK
-+#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
-+#define TRACE_INCLUDE_FILE lttng-statedump
-+
-+#include "../instrumentation/events/lttng-module/lttng-statedump.h"
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-+MODULE_DESCRIPTION("LTTng statedump probes");
---- /dev/null
-+++ b/drivers/staging/lttng/probes/lttng-probe-timer.c
-@@ -0,0 +1,43 @@
-+/*
-+ * probes/lttng-probe-timer.c
-+ *
-+ * LTTng timer probes.
-+ *
-+ * Copyright (C) 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#include <linux/module.h>
-+
-+/*
-+ * Create the tracepoint static inlines from the kernel to validate that our
-+ * trace event macros match the kernel we run on.
-+ */
-+#include <linux/sched.h>
-+#include <trace/events/timer.h>
-+
-+/*
-+ * Create LTTng tracepoint probes.
-+ */
-+#define LTTNG_PACKAGE_BUILD
-+#define CREATE_TRACE_POINTS
-+#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
-+
-+#include "../instrumentation/events/lttng-module/timer.h"
-+
-+MODULE_LICENSE("GPL and additional rights");
-+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
-+MODULE_DESCRIPTION("LTTng timer probes");
---- a/drivers/staging/lttng/probes/lttng-type-list.h
-+++ b/drivers/staging/lttng/probes/lttng-type-list.h
-@@ -1,9 +1,21 @@
- /*
- * lttng-type-list.h
- *
-- * Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- /* Type list, used to create metadata */
---- a/drivers/staging/lttng/probes/lttng-types.c
-+++ b/drivers/staging/lttng/probes/lttng-types.c
-@@ -1,17 +1,29 @@
- /*
- * probes/lttng-types.c
- *
-- * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
- * LTTng types.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/module.h>
- #include <linux/types.h>
- #include "../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
--#include "../ltt-events.h"
-+#include "../lttng-events.h"
- #include "lttng-types.h"
- #include <linux/hrtimer.h>
-
---- a/drivers/staging/lttng/probes/lttng-types.h
-+++ b/drivers/staging/lttng/probes/lttng-types.h
-@@ -8,18 +8,30 @@
- /*
- * probes/lttng-types.h
- *
-- * Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-- *
- * LTTng types.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/seq_file.h>
- #include "lttng.h"
--#include "../ltt-events.h"
--#include "../ltt-tracer.h"
--#include "../ltt-endian.h"
-+#include "../lttng-events.h"
-+#include "../lttng-tracer.h"
-+#include "../lttng-endian.h"
-
- #endif /* _LTTNG_PROBES_LTTNG_TYPES_H */
-
---- a/drivers/staging/lttng/probes/lttng.h
-+++ b/drivers/staging/lttng/probes/lttng.h
-@@ -4,9 +4,21 @@
- /*
- * lttng.h
- *
-- * Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #undef PARAMS
---- a/drivers/staging/lttng/wrapper/ftrace.h
-+++ b/drivers/staging/lttng/wrapper/ftrace.h
-@@ -1,14 +1,28 @@
--#ifndef _LTT_WRAPPER_FTRACE_H
--#define _LTT_WRAPPER_FTRACE_H
-+#ifndef _LTTNG_WRAPPER_FTRACE_H
-+#define _LTTNG_WRAPPER_FTRACE_H
-
- /*
-- * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
-+ * wrapper/ftrace.h
- *
- * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
- * available, else we need to have a kernel that exports this function to GPL
- * modules.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/ftrace.h>
-@@ -67,4 +81,4 @@ void wrapper_unregister_ftrace_function_
- }
- #endif
-
--#endif /* _LTT_WRAPPER_FTRACE_H */
-+#endif /* _LTTNG_WRAPPER_FTRACE_H */
---- a/drivers/staging/lttng/wrapper/inline_memcpy.h
-+++ b/drivers/staging/lttng/wrapper/inline_memcpy.h
-@@ -1,9 +1,21 @@
- /*
- * wrapper/inline_memcpy.h
- *
-- * Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #if !defined(__HAVE_ARCH_INLINE_MEMCPY) && !defined(inline_memcpy)
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/irqdesc.c
-@@ -0,0 +1,58 @@
-+/*
-+ * wrapper/irqdesc.c
-+ *
-+ * wrapper around irq_to_desc. Using KALLSYMS to get its address when
-+ * available, else we need to have a kernel that exports this function to GPL
-+ * modules.
-+ *
-+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#ifdef CONFIG_KALLSYMS
-+
-+#include <linux/kallsyms.h>
-+#include <linux/interrupt.h>
-+#include <linux/irqnr.h>
-+#include "kallsyms.h"
-+#include "irqdesc.h"
-+
-+static
-+struct irq_desc *(*irq_to_desc_sym)(unsigned int irq);
-+
-+struct irq_desc *wrapper_irq_to_desc(unsigned int irq)
-+{
-+ if (!irq_to_desc_sym)
-+ irq_to_desc_sym = (void *) kallsyms_lookup_funcptr("irq_to_desc");
-+ if (irq_to_desc_sym) {
-+ return irq_to_desc_sym(irq);
-+ } else {
-+ printk(KERN_WARNING "LTTng: irq_to_desc symbol lookup failed.\n");
-+ return NULL;
-+ }
-+}
-+
-+#else
-+
-+#include <linux/interrupt.h>
-+#include <linux/irqnr.h>
-+
-+struct irq_desc *wrapper_irq_to_desc(unsigned int irq)
-+{
-+ return irq_to_desc(irq);
-+}
-+
-+#endif
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/irqdesc.h
-@@ -0,0 +1,33 @@
-+#ifndef _LTTNG_WRAPPER_IRQDESC_H
-+#define _LTTNG_WRAPPER_IRQDESC_H
-+
-+/*
-+ * wrapper/irqdesc.h
-+ *
-+ * wrapper around irq_to_desc. Using KALLSYMS to get its address when
-+ * available, else we need to have a kernel that exports this function to GPL
-+ * modules.
-+ *
-+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#include <linux/interrupt.h>
-+#include <linux/irqnr.h>
-+
-+struct irq_desc *wrapper_irq_to_desc(unsigned int irq);
-+
-+#endif /* _LTTNG_WRAPPER_IRQDESC_H */
---- a/drivers/staging/lttng/wrapper/kallsyms.h
-+++ b/drivers/staging/lttng/wrapper/kallsyms.h
-@@ -1,18 +1,49 @@
--#ifndef _LTT_WRAPPER_KALLSYMS_H
--#define _LTT_WRAPPER_KALLSYMS_H
--
--#include <linux/kallsyms.h>
-+#ifndef _LTTNG_WRAPPER_KALLSYMS_H
-+#define _LTTNG_WRAPPER_KALLSYMS_H
-
- /*
-- * Copyright (C) 2011 Avik Sil (avik.sil@linaro.org)
-+ * wrapper/kallsyms.h
- *
- * wrapper around kallsyms_lookup_name. Implements arch-dependent code for
- * arches where the address of the start of the function body is different
- * from the pointer which can be used to call the function, e.g. ARM THUMB2.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2011 Avik Sil (avik.sil@linaro.org)
-+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ * Copyright (C) 2011 Avik Sil (avik.sil@linaro.org)
-+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-+#include <linux/kallsyms.h>
-+
- static inline
- unsigned long kallsyms_lookup_funcptr(const char *name)
- {
-@@ -27,4 +58,4 @@ unsigned long kallsyms_lookup_funcptr(co
- #endif
- return addr;
- }
--#endif /* _LTT_WRAPPER_KALLSYMS_H */
-+#endif /* _LTTNG_WRAPPER_KALLSYMS_H */
---- a/drivers/staging/lttng/wrapper/perf.h
-+++ b/drivers/staging/lttng/wrapper/perf.h
-@@ -1,10 +1,24 @@
--#ifndef _LTT_WRAPPER_PERF_H
--#define _LTT_WRAPPER_PERF_H
-+#ifndef _LTTNG_WRAPPER_PERF_H
-+#define _LTTNG_WRAPPER_PERF_H
-
- /*
-- * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
-+ * wrapper/perf.h
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/perf_event.h>
-@@ -29,4 +43,4 @@ wrapper_perf_event_create_kernel_counter
- }
- #endif
-
--#endif /* _LTT_WRAPPER_PERF_H */
-+#endif /* _LTTNG_WRAPPER_PERF_H */
---- a/drivers/staging/lttng/wrapper/poll.h
-+++ b/drivers/staging/lttng/wrapper/poll.h
-@@ -2,12 +2,32 @@
- #define _LTTNG_WRAPPER_POLL_H
-
- /*
-- * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
-+ * wrapper/poll.h
- *
- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
--#include <linux/poll.h>
-+ #include <linux/poll.h>
-+
-+/*
-+ * Note: poll_wait_set_exclusive() is defined as no-op. Thundering herd
-+ * effect can be noticed with large number of consumer threads.
-+ */
-
- #define poll_wait_set_exclusive(poll_table)
-
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/random.c
-@@ -0,0 +1,77 @@
-+/*
-+ * wrapper/random.c
-+ *
-+ * wrapper around bootid read. Using KALLSYMS to get its address when
-+ * available, else we need to have a kernel that exports this function to GPL
-+ * modules.
-+ *
-+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+/* boot_id depends on sysctl */
-+#if defined(CONFIG_SYSCTL)
-+
-+#include <linux/fs.h>
-+#include <linux/file.h>
-+#include <linux/sched.h>
-+#include <linux/uaccess.h>
-+#include "random.h"
-+
-+/*
-+ * Returns string boot id.
-+ */
-+int wrapper_get_bootid(char *bootid)
-+{
-+ struct file *file;
-+ int ret;
-+ ssize_t len;
-+ mm_segment_t old_fs;
-+
-+ file = filp_open("/proc/sys/kernel/random/boot_id", O_RDONLY, 0);
-+ if (IS_ERR(file))
-+ return PTR_ERR(file);
-+
-+ old_fs = get_fs();
-+ set_fs(KERNEL_DS);
-+
-+ if (!file->f_op || !file->f_op->read) {
-+ ret = -EINVAL;
-+ goto end;
-+ }
-+
-+ len = file->f_op->read(file, bootid, BOOT_ID_LEN - 1, &file->f_pos);
-+ if (len != BOOT_ID_LEN - 1) {
-+ ret = -EINVAL;
-+ goto end;
-+ }
-+
-+ bootid[BOOT_ID_LEN - 1] = '\0';
-+ ret = 0;
-+end:
-+ set_fs(old_fs);
-+ filp_close(file, current->files);
-+ return ret;
-+}
-+
-+#else
-+
-+int wrapper_get_bootid(char *bootid)
-+{
-+ return -ENOSYS;
-+}
-+
-+#endif
---- /dev/null
-+++ b/drivers/staging/lttng/wrapper/random.h
-@@ -0,0 +1,32 @@
-+#ifndef _LTTNG_WRAPPER_RANDOM_H
-+#define _LTTNG_WRAPPER_RANDOM_H
-+
-+/*
-+ * wrapper/random.h
-+ *
-+ * wrapper around bootid read. Using KALLSYMS to get its address when
-+ * available, else we need to have a kernel that exports this function to GPL
-+ * modules.
-+ *
-+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#define BOOT_ID_LEN 37
-+
-+int wrapper_get_bootid(char *bootid);
-+
-+#endif /* _LTTNG_WRAPPER_RANDOM_H */
---- a/drivers/staging/lttng/wrapper/spinlock.h
-+++ b/drivers/staging/lttng/wrapper/spinlock.h
-@@ -1,10 +1,24 @@
--#ifndef _LTT_WRAPPER_SPINLOCK_H
--#define _LTT_WRAPPER_SPINLOCK_H
-+#ifndef _LTTNG_WRAPPER_SPINLOCK_H
-+#define _LTTNG_WRAPPER_SPINLOCK_H
-
- /*
-- * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
-+ * wrapper/spinlock.h
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/version.h>
-@@ -23,4 +37,4 @@
-
-
- #endif
--#endif /* _LTT_WRAPPER_SPINLOCK_H */
-+#endif /* _LTTNG_WRAPPER_SPINLOCK_H */
---- a/drivers/staging/lttng/wrapper/splice.c
-+++ b/drivers/staging/lttng/wrapper/splice.c
-@@ -1,11 +1,25 @@
- /*
-- * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
-+ * wrapper/splice.c
- *
-- * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
-+ * wrapper around splice_to_pipe. Using KALLSYMS to get its address when
- * available, else we need to have a kernel that exports this function to GPL
- * modules.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #ifdef CONFIG_KALLSYMS
---- a/drivers/staging/lttng/wrapper/splice.h
-+++ b/drivers/staging/lttng/wrapper/splice.h
-@@ -1,14 +1,28 @@
--#ifndef _LTT_WRAPPER_SPLICE_H
--#define _LTT_WRAPPER_SPLICE_H
-+#ifndef _LTTNG_WRAPPER_SPLICE_H
-+#define _LTTNG_WRAPPER_SPLICE_H
-
- /*
-- * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
-+ * wrapper/splice.h
- *
-- * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
-+ * wrapper around splice_to_pipe. Using KALLSYMS to get its address when
- * available, else we need to have a kernel that exports this function to GPL
- * modules.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/splice.h>
-@@ -20,4 +34,4 @@ ssize_t wrapper_splice_to_pipe(struct pi
- #define PIPE_DEF_BUFFERS 16
- #endif
-
--#endif /* _LTT_WRAPPER_SPLICE_H */
-+#endif /* _LTTNG_WRAPPER_SPLICE_H */
---- a/drivers/staging/lttng/wrapper/trace-clock.h
-+++ b/drivers/staging/lttng/wrapper/trace-clock.h
-@@ -1,15 +1,29 @@
-+#ifndef _LTTNG_TRACE_CLOCK_H
-+#define _LTTNG_TRACE_CLOCK_H
-+
- /*
-- * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
-+ * wrapper/trace-clock.h
- *
- * Contains LTTng trace clock mapping to LTTng trace clock or mainline monotonic
- * clock. This wrapper depends on CONFIG_HIGH_RES_TIMERS=y.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
--#ifndef _LTT_TRACE_CLOCK_H
--#define _LTT_TRACE_CLOCK_H
--
- #ifdef CONFIG_HAVE_TRACE_CLOCK
- #include <linux/trace-clock.h>
- #else /* CONFIG_HAVE_TRACE_CLOCK */
-@@ -18,6 +32,7 @@
- #include <linux/ktime.h>
- #include <linux/time.h>
- #include <linux/hrtimer.h>
-+#include "random.h"
-
- static inline u64 trace_clock_monotonic_wrapper(void)
- {
-@@ -44,18 +59,24 @@ static inline u64 trace_clock_read64(voi
- return (u64) trace_clock_monotonic_wrapper();
- }
-
--static inline u64 trace_clock_frequency(void)
-+static inline u64 trace_clock_freq(void)
- {
-- return (u64)NSEC_PER_SEC;
-+ return (u64) NSEC_PER_SEC;
- }
-
--static inline u32 trace_clock_freq_scale(void)
-+static inline int trace_clock_uuid(char *uuid)
- {
-- return 1;
-+ return wrapper_get_bootid(uuid);
- }
-
- static inline int get_trace_clock(void)
- {
-+ /*
-+ * LTTng: Using mainline kernel monotonic clock. NMIs will not be
-+ * traced, and expect significant performance degradation compared to
-+ * the LTTng trace clocks. Integration of the LTTng 0.x trace clocks
-+ * into LTTng 2.0 is planned in a near future.
-+ */
- printk(KERN_WARNING "LTTng: Using mainline kernel monotonic clock.\n");
- printk(KERN_WARNING " * NMIs will not be traced,\n");
- printk(KERN_WARNING " * expect significant performance degradation compared to the\n");
-@@ -72,4 +93,4 @@ static inline void put_trace_clock(void)
-
- #endif /* CONFIG_HAVE_TRACE_CLOCK */
-
--#endif /* _LTT_TRACE_CLOCK_H */
-+#endif /* _LTTNG_TRACE_CLOCK_H */
---- a/drivers/staging/lttng/wrapper/uuid.h
-+++ b/drivers/staging/lttng/wrapper/uuid.h
-@@ -1,10 +1,24 @@
--#ifndef _LTT_WRAPPER_UUID_H
--#define _LTT_WRAPPER_UUID_H
-+#ifndef _LTTNG_WRAPPER_UUID_H
-+#define _LTTNG_WRAPPER_UUID_H
-
- /*
-- * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
-+ * wrapper/uuid.h
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #include <linux/version.h>
-@@ -26,4 +40,4 @@ void uuid_le_gen(uuid_le *u)
- }
-
- #endif
--#endif /* _LTT_WRAPPER_UUID_H */
-+#endif /* _LTTNG_WRAPPER_UUID_H */
---- a/drivers/staging/lttng/wrapper/vmalloc.h
-+++ b/drivers/staging/lttng/wrapper/vmalloc.h
-@@ -1,14 +1,28 @@
--#ifndef _LTT_WRAPPER_VMALLOC_H
--#define _LTT_WRAPPER_VMALLOC_H
-+#ifndef _LTTNG_WRAPPER_VMALLOC_H
-+#define _LTTNG_WRAPPER_VMALLOC_H
-
- /*
-- * Copyright (C) 2011 Mathieu Desnoyers (mathieu.desnoyers@efficios.com)
-+ * wrapper/vmalloc.h
- *
- * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
- * available, else we need to have a kernel that exports this function to GPL
- * modules.
- *
-- * Dual LGPL v2.1/GPL v2 license.
-+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-+ *
-+ * This library is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; only
-+ * version 2.1 of the License.
-+ *
-+ * This library is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with this library; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
- #ifdef CONFIG_KALLSYMS
-@@ -46,4 +60,4 @@ void wrapper_vmalloc_sync_all(void)
- }
- #endif
-
--#endif /* _LTT_WRAPPER_VMALLOC_H */
-+#endif /* _LTTNG_WRAPPER_VMALLOC_H */
diff --git a/series b/series
index 1f498015653..af418c9e61f 100644
--- a/series
+++ b/series
@@ -22,41 +22,11 @@ patches.ltsi/ltsi-makefile-addition.patch
#############################################################################
# LTTNG
#
-# Patches came from short-lived experiment when they were added to the staging
-# tree for a week or so.
+# 2.3.4 version of LTTng, taken from the upstream:
+# git://git.lttng.org/lttng-modules.git
+# repo.
#
-patches.lttng/0000-lttng-lib-lttng-priority-heap.patch
-patches.lttng/0001-lttng-lib-ring-buffer.patch
-patches.lttng/0002-lttng-lib-portable-bitfield-read-write-header.patch
-patches.lttng/0003-lttng-BUILD_RUNTIME_BUG_ON.patch
-patches.lttng/0004-lttng-offset-alignment-header.patch
-patches.lttng/0005-lttng-libs-add-Makefile.patch
-patches.lttng/0006-lttng-wrappers.patch
-patches.lttng/0007-lttng-instrumentation-tracepoint-events.patch
-patches.lttng/0008-lttng-syscall-instrumentation.patch
-patches.lttng/0009-lttng-lib-ring-buffer-clients.patch
-patches.lttng/0010-lttng-tracer-control-and-core-structures.patch
-patches.lttng/0011-lttng-dynamically-selectable-context-information.patch
-patches.lttng/0012-lttng-timing-calibration-feature.patch
-patches.lttng/0013-lttng-debugfs-and-procfs-ABI.patch
-patches.lttng/0014-lttng-Add-documentation-and-TODO-files.patch
-patches.lttng/0015-lttng-add-system-call-instrumentation-probe.patch
-patches.lttng/0016-lttng-probe-callbacks.patch
-patches.lttng/0017-lttng-toplevel-Makefile-and-Kconfig.patch
-patches.lttng/0018-staging-add-LTTng-to-build.patch
-patches.lttng/0019-staging-Add-LTTng-entry-to-MAINTAINERS-file.patch
-patches.lttng/0069-lttng-lib-ring-buffer-remove-stale-null-pointer.patch
-patches.lttng/0070-lttng-lib-ring-buffer-remove-duplicate-null-pointer.patch
-patches.lttng/0071-lttng-lib-ring-buffer-move-null-pointer-check-to-ope.patch
-patches.lttng/0072-lttng-wrapper-add-missing-include-to-kallsyms-wrappe.patch
-patches.lttng/0073-staging-lttng-cleanup-one-bit-signed-bitfields.patch
-patches.lttng/0172-staging-lttng-Fix-recent-modifications-to-string_fro.patch
-patches.lttng/0173-staging-lttng-TODO-update-lttng-reported-to-work-fin.patch
-patches.lttng/0174-staging-lttng-Update-max-symbol-length-to-256.patch
-patches.lttng/lttng-update-to-v2.0.1.patch
-patches.lttng/lttng-update-2.0.1-to-2.0.4.patch
-patches.lttng/lttng-fix-module-name-lttng-relay.ko-lttng-tracer.ko.patch
-patches.lttng/lttng-fix-reference-to-obsolete-rt-kconfig-variable.patch
+patches.lttng/lttng-2.3.4.patch