From 1d321881afb955bba1e0a8f50d3a04e111fcb581 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Mon, 28 Mar 2011 00:46:11 +0400 Subject: perf, x86: P4 PMU - clean up the code a bit No change on the functional level, just align the table properly. Signed-off-by: Cyrill Gorcunov Cc: Lin Ming LKML-Reference: <4D8FA213.5050108@openvz.org> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_p4.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index c2520e178d3..8ff882fdb1c 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c @@ -468,7 +468,7 @@ static struct p4_event_bind p4_event_bind_map[] = { .opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED), .escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 }, .escr_emask = - P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS), + P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS), .cntr = { {12, 13, 16}, {14, 15, 17} }, }, [P4_EVENT_X87_ASSIST] = { -- cgit v1.2.3 From cd25f8bc2696664877b21d33b7994e12fa570919 Mon Sep 17 00:00:00 2001 From: Lin Ming Date: Fri, 25 Mar 2011 16:27:48 +0800 Subject: perf probe: Add fastpath to do lookup by function name v3 -> v2: - Make pubname_search_cb more generic - Add fastpath to find_probes also v2 -> v1: - Don't compare file names with cu_find_realpath(...), instead, compare them with the name returned by dwarf_decl_file(sp_die) The vmlinux file may have thousands of CUs. We can lookup function name from .debug_pubnames section to avoid the slow loop on CUs. 1. Improvement data for find_line_range ./perf stat -e cycles -r 10 -- ./perf probe -k /home/mlin/vmlinux \ -s /home/mlin/linux-2.6 \ --line csum_partial_copy_to_user > tmp.log before patch applied ===================== 847,988,276 cycles 0.355075856 seconds time elapsed after patch applied ===================== 206,102,622 cycles 0.086883555 seconds time elapsed 2. Improvement data for find_probes ./perf stat -e cycles -r 10 -- ./perf probe -k /home/mlin/vmlinux \ -s /home/mlin/linux-2.6 \ --vars csum_partial_copy_to_user > tmp.log before patch applied ===================== 848,490,844 cycles 0.355307901 seconds time elapsed after patch applied ===================== 205,684,469 cycles 0.086694010 seconds time elapsed Acked-by: Masami Hiramatsu Cc: Ingo Molnar Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: linux-kernel LKML-Reference: <1301041668.14111.52.camel@minggr.sh.intel.com> Signed-off-by: Lin Ming Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/probe-finder.c | 72 ++++++++++++++++++++++++++++++++++++++++++ tools/perf/util/probe-finder.h | 2 ++ 2 files changed, 74 insertions(+) diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 194f9e2a328..ff416b85f7e 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -1435,6 +1435,38 @@ static int find_probe_point_by_func(struct probe_finder *pf) return _param.retval; } +struct pubname_callback_param { + char *function; + char *file; + Dwarf_Die *cu_die; + Dwarf_Die *sp_die; + int found; +}; + +static int pubname_search_cb(Dwarf *dbg, Dwarf_Global *gl, void *data) +{ + struct pubname_callback_param *param = data; + + if (dwarf_offdie(dbg, gl->die_offset, param->sp_die)) { + if (dwarf_tag(param->sp_die) != DW_TAG_subprogram) + return DWARF_CB_OK; + + if (die_compare_name(param->sp_die, param->function)) { + if (!dwarf_offdie(dbg, gl->cu_offset, param->cu_die)) + return DWARF_CB_OK; + + if (param->file && + strtailcmp(param->file, dwarf_decl_file(param->sp_die))) + return DWARF_CB_OK; + + param->found = 1; + return DWARF_CB_ABORT; + } + } + + return DWARF_CB_OK; +} + /* Find probe points from debuginfo */ static int find_probes(int fd, struct probe_finder *pf) { @@ -1461,6 +1493,27 @@ static int find_probes(int fd, struct probe_finder *pf) off = 0; line_list__init(&pf->lcache); + + /* Fastpath: lookup by function name from .debug_pubnames section */ + if (pp->function) { + struct pubname_callback_param pubname_param = { + .function = pp->function, + .file = pp->file, + .cu_die = &pf->cu_die, + .sp_die = &pf->sp_die, + }; + struct dwarf_callback_param probe_param = { + .data = pf, + }; + + dwarf_getpubnames(dbg, pubname_search_cb, &pubname_param, 0); + if (pubname_param.found) { + ret = probe_point_search_cb(&pf->sp_die, &probe_param); + if (ret) + goto found; + } + } + /* Loop on CUs (Compilation Unit) */ while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL)) { /* Get the DIE(Debugging Information Entry) of this CU */ @@ -1488,6 +1541,8 @@ static int find_probes(int fd, struct probe_finder *pf) } off = noff; } + +found: line_list__free(&pf->lcache); if (dwfl) dwfl_end(dwfl); @@ -1895,6 +1950,22 @@ int find_line_range(int fd, struct line_range *lr) return -EBADF; } + /* Fastpath: lookup by function name from .debug_pubnames section */ + if (lr->function) { + struct pubname_callback_param pubname_param = { + .function = lr->function, .file = lr->file, + .cu_die = &lf.cu_die, .sp_die = &lf.sp_die, .found = 0}; + struct dwarf_callback_param line_range_param = { + .data = (void *)&lf, .retval = 0}; + + dwarf_getpubnames(dbg, pubname_search_cb, &pubname_param, 0); + if (pubname_param.found) { + line_range_search_cb(&lf.sp_die, &line_range_param); + if (lf.found) + goto found; + } + } + /* Loop on CUs (Compilation Unit) */ while (!lf.found && ret >= 0) { if (dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) != 0) @@ -1923,6 +1994,7 @@ int find_line_range(int fd, struct line_range *lr) off = noff; } +found: /* Store comp_dir */ if (lf.found) { comp_dir = cu_get_comp_dir(&lf.cu_die); diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index beaefc3c122..605730a366d 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -49,6 +49,7 @@ struct probe_finder { Dwarf_Addr addr; /* Address */ const char *fname; /* Real file name */ Dwarf_Die cu_die; /* Current CU */ + Dwarf_Die sp_die; struct list_head lcache; /* Line cache for lazy match */ /* For variable searching */ @@ -83,6 +84,7 @@ struct line_finder { int lno_s; /* Start line number */ int lno_e; /* End line number */ Dwarf_Die cu_die; /* Current CU */ + Dwarf_Die sp_die; int found; }; -- cgit v1.2.3 From 2c9e45f7a287384e1382932597e41a9a567811ba Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 17 Mar 2011 10:03:21 -0600 Subject: perf script: If type not given fields apply to all event types Allow: perf script -f to be equivalent to: perf script -f trace: -f sw: -f hw: i.e., the specified fields apply to all event types if the type string is not given. The field (-f) arguments are processed in the order received. A later usage can reset a prior request. e.g., -f trace: -f comm,tid,time,sym The first -f suppresses trace events (field list is ""), but then the second invocation sets the fields to comm,tid,time,sym. In this case a warning is given to the user: "Overriding previous field request for all events." Alternativey, consider the order: -f comm,tid,time,sym -f trace: The first -f sets the fields for all events and the second -f suppresses trace events. The user is given a warning message about the override, and the result of the above is that only S/W and H/W events are displayed with the given fields. For the 'wildcard' option if a user selected field is invalid for an event type, a message is displayed to the user that the option is ignored for that type. For example: perf script -f comm,tid,trace 2>&1 | less 'trace' not valid for hardware events. Ignoring. 'trace' not valid for software events. Ignoring. Alternatively, if the type is given an invalid field is specified it is an error. For example: perf script -v -f sw:comm,tid,trace 2>&1 | less 'trace' not valid for software events. At this point usage is displayed, and perf-script exits. Finally, a user may not set fields to none for all event types. i.e., -f "" is not allowed. Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Cc: linux-kernel@vger.kernel.org LPU-Reference: <1300377801-27246-1-git-send-email-daahern@cisco.com> Signed-off-by: David Ahern Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-script.c | 171 +++++++++++++++++++++++++++++++------------- 1 file changed, 122 insertions(+), 49 deletions(-) diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index ac574ea2391..6cf811acc41 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -49,23 +49,52 @@ struct output_option { }; /* default set to maintain compatibility with current format */ -static u64 output_fields[PERF_TYPE_MAX] = { - [PERF_TYPE_HARDWARE] = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | \ - PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | \ - PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, - - [PERF_TYPE_SOFTWARE] = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | \ - PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | \ - PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, - - [PERF_TYPE_TRACEPOINT] = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | \ - PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | \ - PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE, +static struct { + bool user_set; + u64 fields; + u64 invalid_fields; +} output[PERF_TYPE_MAX] = { + + [PERF_TYPE_HARDWARE] = { + .user_set = false, + + .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | + PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | + PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, + + .invalid_fields = PERF_OUTPUT_TRACE, + }, + + [PERF_TYPE_SOFTWARE] = { + .user_set = false, + + .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | + PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | + PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, + + .invalid_fields = PERF_OUTPUT_TRACE, + }, + + [PERF_TYPE_TRACEPOINT] = { + .user_set = false, + + .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | + PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | + PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE, + }, }; -static bool output_set_by_user; +static bool output_set_by_user(void) +{ + int j; + for (j = 0; j < PERF_TYPE_MAX; ++j) { + if (output[j].user_set) + return true; + } + return false; +} -#define PRINT_FIELD(x) (output_fields[attr->type] & PERF_OUTPUT_##x) +#define PRINT_FIELD(x) (output[attr->type].fields & PERF_OUTPUT_##x) static int perf_session__check_attr(struct perf_session *session, struct perf_event_attr *attr) @@ -168,7 +197,7 @@ static void process_event(union perf_event *event __unused, { struct perf_event_attr *attr = &evsel->attr; - if (output_fields[attr->type] == 0) + if (output[attr->type].fields == 0) return; if (perf_session__check_attr(session, attr) < 0) @@ -451,6 +480,7 @@ static int parse_output_fields(const struct option *opt __used, { char *tok; int i, imax = sizeof(all_output_options) / sizeof(struct output_option); + int j; int rc = 0; char *str = strdup(arg); int type = -1; @@ -458,52 +488,95 @@ static int parse_output_fields(const struct option *opt __used, if (!str) return -ENOMEM; - tok = strtok(str, ":"); - if (!tok) { - fprintf(stderr, - "Invalid field string - not prepended with type."); - return -EINVAL; - } - - /* first word should state which event type user - * is specifying the fields + /* first word can state for which event type the user is specifying + * the fields. If no type exists, the specified fields apply to all + * event types found in the file minus the invalid fields for a type. */ - if (!strcmp(tok, "hw")) - type = PERF_TYPE_HARDWARE; - else if (!strcmp(tok, "sw")) - type = PERF_TYPE_SOFTWARE; - else if (!strcmp(tok, "trace")) - type = PERF_TYPE_TRACEPOINT; - else { - fprintf(stderr, "Invalid event type in field string."); - return -EINVAL; + tok = strchr(str, ':'); + if (tok) { + *tok = '\0'; + tok++; + if (!strcmp(str, "hw")) + type = PERF_TYPE_HARDWARE; + else if (!strcmp(str, "sw")) + type = PERF_TYPE_SOFTWARE; + else if (!strcmp(str, "trace")) + type = PERF_TYPE_TRACEPOINT; + else { + fprintf(stderr, "Invalid event type in field string.\n"); + return -EINVAL; + } + + if (output[type].user_set) + pr_warning("Overriding previous field request for %s events.\n", + event_type(type)); + + output[type].fields = 0; + output[type].user_set = true; + + } else { + tok = str; + if (strlen(str) == 0) { + fprintf(stderr, + "Cannot set fields to 'none' for all event types.\n"); + rc = -EINVAL; + goto out; + } + + if (output_set_by_user()) + pr_warning("Overriding previous field request for all events.\n"); + + for (j = 0; j < PERF_TYPE_MAX; ++j) { + output[j].fields = 0; + output[j].user_set = true; + } } - output_fields[type] = 0; - while (1) { - tok = strtok(NULL, ","); - if (!tok) - break; + tok = strtok(tok, ","); + while (tok) { for (i = 0; i < imax; ++i) { - if (strcmp(tok, all_output_options[i].str) == 0) { - output_fields[type] |= all_output_options[i].field; + if (strcmp(tok, all_output_options[i].str) == 0) break; - } } if (i == imax) { - fprintf(stderr, "Invalid field requested."); + fprintf(stderr, "Invalid field requested.\n"); rc = -EINVAL; - break; + goto out; } - } - if (output_fields[type] == 0) { - pr_debug("No fields requested for %s type. " - "Events will not be displayed\n", event_type(type)); + if (type == -1) { + /* add user option to all events types for + * which it is valid + */ + for (j = 0; j < PERF_TYPE_MAX; ++j) { + if (output[j].invalid_fields & all_output_options[i].field) { + pr_warning("\'%s\' not valid for %s events. Ignoring.\n", + all_output_options[i].str, event_type(j)); + } else + output[j].fields |= all_output_options[i].field; + } + } else { + if (output[type].invalid_fields & all_output_options[i].field) { + fprintf(stderr, "\'%s\' not valid for %s events.\n", + all_output_options[i].str, event_type(type)); + + rc = -EINVAL; + goto out; + } + output[type].fields |= all_output_options[i].field; + } + + tok = strtok(NULL, ","); } - output_set_by_user = true; + if (type >= 0) { + if (output[type].fields == 0) { + pr_debug("No fields requested for %s type. " + "Events will not be displayed.\n", event_type(type)); + } + } +out: free(str); return rc; } @@ -1020,7 +1093,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) struct stat perf_stat; int input; - if (output_set_by_user) { + if (output_set_by_user()) { fprintf(stderr, "custom fields not supported for generated scripts"); return -1; -- cgit v1.2.3 From 176fcc5c5f0131504a55e1e1d35389c49a9177c2 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 30 Mar 2011 15:30:43 -0300 Subject: perf script: Add more documentation about the -f/--fields parameters Using the commit log for 2c9e45f. Cc: David Ahern Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Tom Zanussi LKML-Reference: Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-script.txt | 52 ++++++++++++++++++++++++++++++-- 1 file changed, 50 insertions(+), 2 deletions(-) diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt index 66f040b3072..86c87e214b1 100644 --- a/tools/perf/Documentation/perf-script.txt +++ b/tools/perf/Documentation/perf-script.txt @@ -113,13 +113,61 @@ OPTIONS Do various checks like samples ordering and lost events. -f:: ---fields +--fields:: Comma separated list of fields to print. Options are: comm, tid, pid, time, cpu, event, trace, sym. Field - list must be prepended with the type, trace, sw or hw, + list can be prepended with the type, trace, sw or hw, to indicate to which event type the field list applies. e.g., -f sw:comm,tid,time,sym and -f trace:time,cpu,trace + perf script -f + + is equivalent to: + + perf script -f trace: -f sw: -f hw: + + i.e., the specified fields apply to all event types if the type string + is not given. + + The arguments are processed in the order received. A later usage can + reset a prior request. e.g.: + + -f trace: -f comm,tid,time,sym + + The first -f suppresses trace events (field list is ""), but then the + second invocation sets the fields to comm,tid,time,sym. In this case a + warning is given to the user: + + "Overriding previous field request for all events." + + Alternativey, consider the order: + + -f comm,tid,time,sym -f trace: + + The first -f sets the fields for all events and the second -f + suppresses trace events. The user is given a warning message about + the override, and the result of the above is that only S/W and H/W + events are displayed with the given fields. + + For the 'wildcard' option if a user selected field is invalid for an + event type, a message is displayed to the user that the option is + ignored for that type. For example: + + $ perf script -f comm,tid,trace + 'trace' not valid for hardware events. Ignoring. + 'trace' not valid for software events. Ignoring. + + Alternatively, if the type is given an invalid field is specified it + is an error. For example: + + perf script -v -f sw:comm,tid,trace + 'trace' not valid for software events. + + At this point usage is displayed, and perf-script exits. + + Finally, a user may not set fields to none for all event types. + i.e., -f "" is not allowed. + -k:: --vmlinux=:: vmlinux pathname -- cgit v1.2.3 From 0588fa30db44fd2d4032b36a061c87478a43fbee Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 21 Mar 2011 22:59:21 -0400 Subject: tracing: Convert trace_printk() formats for module to const char * The trace_printk() formats for modules do not show up in the debugfs/tracing/printk_formats file. Only the formats that are for trace_printk()s that are in the kernel core. To facilitate the change to add trace_printk() formats from modules into that file as well, we need to convert the structure that holds the formats from char fmt[], into const char *fmt, and allocate them separately. Signed-off-by: Steven Rostedt --- kernel/trace/trace_printk.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 2547d8813cf..b8b268158af 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c @@ -32,7 +32,7 @@ static DEFINE_MUTEX(btrace_mutex); struct trace_bprintk_fmt { struct list_head list; - char fmt[0]; + const char *fmt; }; static inline struct trace_bprintk_fmt *lookup_format(const char *fmt) @@ -49,6 +49,7 @@ static void hold_module_trace_bprintk_format(const char **start, const char **end) { const char **iter; + char *fmt; mutex_lock(&btrace_mutex); for (iter = start; iter < end; iter++) { @@ -58,14 +59,18 @@ void hold_module_trace_bprintk_format(const char **start, const char **end) continue; } - tb_fmt = kmalloc(offsetof(struct trace_bprintk_fmt, fmt) - + strlen(*iter) + 1, GFP_KERNEL); - if (tb_fmt) { + tb_fmt = kmalloc(sizeof(*tb_fmt), GFP_KERNEL); + if (tb_fmt) + fmt = kmalloc(strlen(*iter) + 1, GFP_KERNEL); + if (tb_fmt && fmt) { list_add_tail(&tb_fmt->list, &trace_bprintk_fmt_list); - strcpy(tb_fmt->fmt, *iter); + strcpy(fmt, *iter); + tb_fmt->fmt = fmt; *iter = tb_fmt->fmt; - } else + } else { + kfree(tb_fmt); *iter = NULL; + } } mutex_unlock(&btrace_mutex); } -- cgit v1.2.3 From 1813dc3776c22ad4b0294a6df8434b9a02c98109 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 21 Mar 2011 23:36:31 -0400 Subject: tracing: Print trace_bprintk() formats for modules too The file debugfs/tracing/printk_formats maps the addresses to the formats that are used by trace_bprintk() so that userspace tools can read the buffer and be able to decode trace_bprintk events to get the format saved when reading the ring buffer directly. This is because trace_bprintk() does not store the format into the buffer, but just the address of the format, which is hidden in the kernel memory. But currently it only exports trace_bprintk()s from the kernel core and not for modules. The modules need their formats exported as well. Signed-off-by: Steven Rostedt --- kernel/trace/trace_printk.c | 103 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 97 insertions(+), 6 deletions(-) diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index b8b268158af..dff763b7baf 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c @@ -89,6 +89,76 @@ static int module_trace_bprintk_format_notify(struct notifier_block *self, return 0; } +/* + * The debugfs/tracing/printk_formats file maps the addresses with + * the ASCII formats that are used in the bprintk events in the + * buffer. For userspace tools to be able to decode the events from + * the buffer, they need to be able to map the address with the format. + * + * The addresses of the bprintk formats are in their own section + * __trace_printk_fmt. But for modules we copy them into a link list. + * The code to print the formats and their addresses passes around the + * address of the fmt string. If the fmt address passed into the seq + * functions is within the kernel core __trace_printk_fmt section, then + * it simply uses the next pointer in the list. + * + * When the fmt pointer is outside the kernel core __trace_printk_fmt + * section, then we need to read the link list pointers. The trick is + * we pass the address of the string to the seq function just like + * we do for the kernel core formats. To get back the structure that + * holds the format, we simply use containerof() and then go to the + * next format in the list. + */ +static const char ** +find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos) +{ + struct trace_bprintk_fmt *mod_fmt; + + if (list_empty(&trace_bprintk_fmt_list)) + return NULL; + + /* + * v will point to the address of the fmt record from t_next + * v will be NULL from t_start. + * If this is the first pointer or called from start + * then we need to walk the list. + */ + if (!v || start_index == *pos) { + struct trace_bprintk_fmt *p; + + /* search the module list */ + list_for_each_entry(p, &trace_bprintk_fmt_list, list) { + if (start_index == *pos) + return &p->fmt; + start_index++; + } + /* pos > index */ + return NULL; + } + + /* + * v points to the address of the fmt field in the mod list + * structure that holds the module print format. + */ + mod_fmt = container_of(v, typeof(*mod_fmt), fmt); + if (mod_fmt->list.next == &trace_bprintk_fmt_list) + return NULL; + + mod_fmt = container_of(mod_fmt->list.next, typeof(*mod_fmt), list); + + return &mod_fmt->fmt; +} + +static void format_mod_start(void) +{ + mutex_lock(&btrace_mutex); +} + +static void format_mod_stop(void) +{ + mutex_unlock(&btrace_mutex); +} + #else /* !CONFIG_MODULES */ __init static int module_trace_bprintk_format_notify(struct notifier_block *self, @@ -96,6 +166,13 @@ module_trace_bprintk_format_notify(struct notifier_block *self, { return 0; } +static inline const char ** +find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos) +{ + return NULL; +} +static inline void format_mod_start(void) { } +static inline void format_mod_stop(void) { } #endif /* CONFIG_MODULES */ @@ -158,20 +235,33 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) } EXPORT_SYMBOL_GPL(__ftrace_vprintk); +static const char **find_next(void *v, loff_t *pos) +{ + const char **fmt = v; + int start_index; + + if (!fmt) + fmt = __start___trace_bprintk_fmt + *pos; + + start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt; + + if (*pos < start_index) + return fmt; + + return find_next_mod_format(start_index, v, fmt, pos); +} + static void * t_start(struct seq_file *m, loff_t *pos) { - const char **fmt = __start___trace_bprintk_fmt + *pos; - - if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt) - return NULL; - return fmt; + format_mod_start(); + return find_next(NULL, pos); } static void *t_next(struct seq_file *m, void * v, loff_t *pos) { (*pos)++; - return t_start(m, pos); + return find_next(v, pos); } static int t_show(struct seq_file *m, void *v) @@ -210,6 +300,7 @@ static int t_show(struct seq_file *m, void *v) static void t_stop(struct seq_file *m, void *p) { + format_mod_stop(); } static const struct seq_operations show_format_seq_ops = { -- cgit v1.2.3 From ee5e51f51be755830f57445e268ba50e88ccbdbb Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 25 Mar 2011 12:05:18 +0100 Subject: tracing: Avoid soft lockup in trace_pipe running following commands: # enable the binary option echo 1 > ./options/bin # disable context info option echo 0 > ./options/context-info # tracing only events echo 1 > ./events/enable cat trace_pipe plus forcing system to generate many tracing events, is causing lockup (in NON preemptive kernels) inside tracing_read_pipe function. The issue is also easily reproduced by running ltp stress test. (ftrace_stress_test.sh) The reasons are: - bin/hex/raw output functions for events are set to trace_nop_print function, which prints nothing and returns TRACE_TYPE_HANDLED value - LOST EVENT trace do not handle trace_seq overflow These reasons force the while loop in tracing_read_pipe function never to break. The attached patch fixies handling of lost event trace, and changes trace_nop_print to print minimal info, which is needed for the correct tracing_read_pipe processing. v2 changes: - omit the cond_resched changes by trace_nop_print changes - WARN changed to WARN_ONCE and added info to be able to find out the culprit v3 changes: - make more accurate patch comment Signed-off-by: Jiri Olsa LKML-Reference: <20110325110518.GC1922@jolsa.brq.redhat.com> Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 15 ++++++++++++--- kernel/trace/trace_output.c | 3 +++ 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9541c27c1cf..5af42f478c0 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2013,9 +2013,10 @@ enum print_line_t print_trace_line(struct trace_iterator *iter) { enum print_line_t ret; - if (iter->lost_events) - trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", - iter->cpu, iter->lost_events); + if (iter->lost_events && + !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", + iter->cpu, iter->lost_events)) + return TRACE_TYPE_PARTIAL_LINE; if (iter->trace && iter->trace->print_line) { ret = iter->trace->print_line(iter); @@ -3229,6 +3230,14 @@ waitagain: if (iter->seq.len >= cnt) break; + + /* + * Setting the full flag means we reached the trace_seq buffer + * size and we should leave by partial output condition above. + * One of the trace_seq_* functions is not used properly. + */ + WARN_ONCE(iter->seq.full, "full flag set for trace type %d", + iter->ent->type); } trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 456be9063c2..cf535ccedc8 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -830,6 +830,9 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_event); enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, struct trace_event *event) { + if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type)) + return TRACE_TYPE_PARTIAL_LINE; + return TRACE_TYPE_HANDLED; } -- cgit v1.2.3 From d430d3d7e646eb1eac2bb4aa244a644312e67c76 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Wed, 16 Mar 2011 17:29:47 -0400 Subject: jump label: Introduce static_branch() interface Introduce: static __always_inline bool static_branch(struct jump_label_key *key); instead of the old JUMP_LABEL(key, label) macro. In this way, jump labels become really easy to use: Define: struct jump_label_key jump_key; Can be used as: if (static_branch(&jump_key)) do unlikely code enable/disale via: jump_label_inc(&jump_key); jump_label_dec(&jump_key); that's it! For the jump labels disabled case, the static_branch() becomes an atomic_read(), and jump_label_inc()/dec() are simply atomic_inc(), atomic_dec() operations. We show testing results for this change below. Thanks to H. Peter Anvin for suggesting the 'static_branch()' construct. Since we now require a 'struct jump_label_key *key', we can store a pointer into the jump table addresses. In this way, we can enable/disable jump labels, in basically constant time. This change allows us to completely remove the previous hashtable scheme. Thanks to Peter Zijlstra for this re-write. Testing: I ran a series of 'tbench 20' runs 5 times (with reboots) for 3 configurations, where tracepoints were disabled. jump label configured in avg: 815.6 jump label *not* configured in (using atomic reads) avg: 800.1 jump label *not* configured in (regular reads) avg: 803.4 Signed-off-by: Peter Zijlstra LKML-Reference: <20110316212947.GA8792@redhat.com> Signed-off-by: Jason Baron Suggested-by: H. Peter Anvin Tested-by: David Daney Acked-by: Ralf Baechle Acked-by: David S. Miller Acked-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt --- arch/mips/include/asm/jump_label.h | 22 +- arch/sparc/include/asm/jump_label.h | 25 +- arch/x86/include/asm/alternative.h | 3 +- arch/x86/include/asm/jump_label.h | 26 +- arch/x86/kernel/alternative.c | 2 +- arch/x86/kernel/module.c | 1 + include/asm-generic/vmlinux.lds.h | 14 +- include/linux/dynamic_debug.h | 2 - include/linux/jump_label.h | 89 +++--- include/linux/jump_label_ref.h | 44 --- include/linux/perf_event.h | 26 +- include/linux/tracepoint.h | 22 +- kernel/jump_label.c | 539 +++++++++++++++--------------------- kernel/perf_event.c | 4 +- kernel/tracepoint.c | 23 +- 15 files changed, 356 insertions(+), 486 deletions(-) delete mode 100644 include/linux/jump_label_ref.h diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h index 7622ccf7507..1881b316ca4 100644 --- a/arch/mips/include/asm/jump_label.h +++ b/arch/mips/include/asm/jump_label.h @@ -20,16 +20,18 @@ #define WORD_INSN ".word" #endif -#define JUMP_LABEL(key, label) \ - do { \ - asm goto("1:\tnop\n\t" \ - "nop\n\t" \ - ".pushsection __jump_table, \"a\"\n\t" \ - WORD_INSN " 1b, %l[" #label "], %0\n\t" \ - ".popsection\n\t" \ - : : "i" (key) : : label); \ - } while (0) - +static __always_inline bool arch_static_branch(struct jump_label_key *key) +{ + asm goto("1:\tnop\n\t" + "nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + WORD_INSN " 1b, %l[l_yes], %0\n\t" + ".popsection\n\t" + : : "i" (key) : : l_yes); + return false; +l_yes: + return true; +} #endif /* __KERNEL__ */ diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h index 427d4684e0d..fc73a82366f 100644 --- a/arch/sparc/include/asm/jump_label.h +++ b/arch/sparc/include/asm/jump_label.h @@ -7,17 +7,20 @@ #define JUMP_LABEL_NOP_SIZE 4 -#define JUMP_LABEL(key, label) \ - do { \ - asm goto("1:\n\t" \ - "nop\n\t" \ - "nop\n\t" \ - ".pushsection __jump_table, \"a\"\n\t"\ - ".align 4\n\t" \ - ".word 1b, %l[" #label "], %c0\n\t" \ - ".popsection \n\t" \ - : : "i" (key) : : label);\ - } while (0) +static __always_inline bool arch_static_branch(struct jump_label_key *key) +{ + asm goto("1:\n\t" + "nop\n\t" + "nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align 4\n\t" + ".word 1b, %l[l_yes], %c0\n\t" + ".popsection \n\t" + : : "i" (key) : : l_yes); + return false; +l_yes: + return true; +} #endif /* __KERNEL__ */ diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 13009d1af99..8cdd1e24797 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -4,7 +4,6 @@ #include #include #include -#include #include /* @@ -191,7 +190,7 @@ extern void *text_poke(void *addr, const void *opcode, size_t len); extern void *text_poke_smp(void *addr, const void *opcode, size_t len); extern void text_poke_smp_batch(struct text_poke_param *params, int n); -#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) +#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_JUMP_LABEL) #define IDEAL_NOP_SIZE_5 5 extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; extern void arch_init_ideal_nop5(void); diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index 574dbc22893..f217cee8653 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h @@ -5,20 +5,24 @@ #include #include +#include #define JUMP_LABEL_NOP_SIZE 5 -# define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" - -# define JUMP_LABEL(key, label) \ - do { \ - asm goto("1:" \ - JUMP_LABEL_INITIAL_NOP \ - ".pushsection __jump_table, \"aw\" \n\t"\ - _ASM_PTR "1b, %l[" #label "], %c0 \n\t" \ - ".popsection \n\t" \ - : : "i" (key) : : label); \ - } while (0) +#define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t" + +static __always_inline bool arch_static_branch(struct jump_label_key *key) +{ + asm goto("1:" + JUMP_LABEL_INITIAL_NOP + ".pushsection __jump_table, \"aw\" \n\t" + _ASM_PTR "1b, %l[l_yes], %c0 \n\t" + ".popsection \n\t" + : : "i" (key) : : l_yes); + return false; +l_yes: + return true; +} #endif /* __KERNEL__ */ diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 4a234677e21..651454b0c81 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -679,7 +679,7 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n) __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); } -#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) +#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_JUMP_LABEL) #ifdef CONFIG_X86_64 unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 }; diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index ab23f1ad4bf..52f256f2cc8 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 32c45e5fe0a..79522166d7f 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -170,6 +170,10 @@ STRUCT_ALIGN(); \ *(__tracepoints) \ /* implement dynamic printk debug */ \ + . = ALIGN(8); \ + VMLINUX_SYMBOL(__start___jump_table) = .; \ + *(__jump_table) \ + VMLINUX_SYMBOL(__stop___jump_table) = .; \ . = ALIGN(8); \ VMLINUX_SYMBOL(__start___verbose) = .; \ *(__verbose) \ @@ -228,8 +232,6 @@ \ BUG_TABLE \ \ - JUMP_TABLE \ - \ /* PCI quirks */ \ .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ @@ -589,14 +591,6 @@ #define BUG_TABLE #endif -#define JUMP_TABLE \ - . = ALIGN(8); \ - __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \ - VMLINUX_SYMBOL(__start___jump_table) = .; \ - *(__jump_table) \ - VMLINUX_SYMBOL(__stop___jump_table) = .; \ - } - #ifdef CONFIG_PM_TRACE #define TRACEDATA \ . = ALIGN(4); \ diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index 0c9653f11c1..e747ecd48e1 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -1,8 +1,6 @@ #ifndef _DYNAMIC_DEBUG_H #define _DYNAMIC_DEBUG_H -#include - /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They * use independent hash functions, to reduce the chance of false positives. diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 7880f18e4b8..83e745f3ead 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -1,20 +1,43 @@ #ifndef _LINUX_JUMP_LABEL_H #define _LINUX_JUMP_LABEL_H +#include +#include + #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) + +struct jump_label_key { + atomic_t enabled; + struct jump_entry *entries; +#ifdef CONFIG_MODULES + struct jump_label_mod *next; +#endif +}; + # include # define HAVE_JUMP_LABEL #endif enum jump_label_type { + JUMP_LABEL_DISABLE = 0, JUMP_LABEL_ENABLE, - JUMP_LABEL_DISABLE }; struct module; #ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_MODULES +#define JUMP_LABEL_INIT {{ 0 }, NULL, NULL} +#else +#define JUMP_LABEL_INIT {{ 0 }, NULL} +#endif + +static __always_inline bool static_branch(struct jump_label_key *key) +{ + return arch_static_branch(key); +} + extern struct jump_entry __start___jump_table[]; extern struct jump_entry __stop___jump_table[]; @@ -23,37 +46,37 @@ extern void jump_label_unlock(void); extern void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type); extern void arch_jump_label_text_poke_early(jump_label_t addr); -extern void jump_label_update(unsigned long key, enum jump_label_type type); -extern void jump_label_apply_nops(struct module *mod); extern int jump_label_text_reserved(void *start, void *end); +extern void jump_label_inc(struct jump_label_key *key); +extern void jump_label_dec(struct jump_label_key *key); +extern bool jump_label_enabled(struct jump_label_key *key); +extern void jump_label_apply_nops(struct module *mod); -#define jump_label_enable(key) \ - jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE); +#else -#define jump_label_disable(key) \ - jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE); +#include -#else +#define JUMP_LABEL_INIT {ATOMIC_INIT(0)} -#define JUMP_LABEL(key, label) \ -do { \ - if (unlikely(*key)) \ - goto label; \ -} while (0) +struct jump_label_key { + atomic_t enabled; +}; -#define jump_label_enable(cond_var) \ -do { \ - *(cond_var) = 1; \ -} while (0) +static __always_inline bool static_branch(struct jump_label_key *key) +{ + if (unlikely(atomic_read(&key->enabled))) + return true; + return false; +} -#define jump_label_disable(cond_var) \ -do { \ - *(cond_var) = 0; \ -} while (0) +static inline void jump_label_inc(struct jump_label_key *key) +{ + atomic_inc(&key->enabled); +} -static inline int jump_label_apply_nops(struct module *mod) +static inline void jump_label_dec(struct jump_label_key *key) { - return 0; + atomic_dec(&key->enabled); } static inline int jump_label_text_reserved(void *start, void *end) @@ -64,16 +87,16 @@ static inline int jump_label_text_reserved(void *start, void *end) static inline void jump_label_lock(void) {} static inline void jump_label_unlock(void) {} -#endif +static inline bool jump_label_enabled(struct jump_label_key *key) +{ + return !!atomic_read(&key->enabled); +} -#define COND_STMT(key, stmt) \ -do { \ - __label__ jl_enabled; \ - JUMP_LABEL(key, jl_enabled); \ - if (0) { \ -jl_enabled: \ - stmt; \ - } \ -} while (0) +static inline int jump_label_apply_nops(struct module *mod) +{ + return 0; +} + +#endif #endif diff --git a/include/linux/jump_label_ref.h b/include/linux/jump_label_ref.h deleted file mode 100644 index e5d012ad92c..00000000000 --- a/include/linux/jump_label_ref.h +++ /dev/null @@ -1,44 +0,0 @@ -#ifndef _LINUX_JUMP_LABEL_REF_H -#define _LINUX_JUMP_LABEL_REF_H - -#include -#include - -#ifdef HAVE_JUMP_LABEL - -static inline void jump_label_inc(atomic_t *key) -{ - if (atomic_add_return(1, key) == 1) - jump_label_enable(key); -} - -static inline void jump_label_dec(atomic_t *key) -{ - if (atomic_dec_and_test(key)) - jump_label_disable(key); -} - -#else /* !HAVE_JUMP_LABEL */ - -static inline void jump_label_inc(atomic_t *key) -{ - atomic_inc(key); -} - -static inline void jump_label_dec(atomic_t *key) -{ - atomic_dec(key); -} - -#undef JUMP_LABEL -#define JUMP_LABEL(key, label) \ -do { \ - if (unlikely(__builtin_choose_expr( \ - __builtin_types_compatible_p(typeof(key), atomic_t *), \ - atomic_read((atomic_t *)(key)), *(key)))) \ - goto label; \ -} while (0) - -#endif /* HAVE_JUMP_LABEL */ - -#endif /* _LINUX_JUMP_LABEL_REF_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 311b4dc785a..730b7821690 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -505,7 +505,7 @@ struct perf_guest_info_callbacks { #include #include #include -#include +#include #include #include @@ -1034,7 +1034,7 @@ static inline int is_software_event(struct perf_event *event) return event->pmu->task_ctx_nr == perf_sw_context; } -extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; +extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); @@ -1063,22 +1063,21 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) { struct pt_regs hot_regs; - JUMP_LABEL(&perf_swevent_enabled[event_id], have_event); - return; - -have_event: - if (!regs) { - perf_fetch_caller_regs(&hot_regs); - regs = &hot_regs; + if (static_branch(&perf_swevent_enabled[event_id])) { + if (!regs) { + perf_fetch_caller_regs(&hot_regs); + regs = &hot_regs; + } + __perf_sw_event(event_id, nr, nmi, regs, addr); } - __perf_sw_event(event_id, nr, nmi, regs, addr); } -extern atomic_t perf_sched_events; +extern struct jump_label_key perf_sched_events; static inline void perf_event_task_sched_in(struct task_struct *task) { - COND_STMT(&perf_sched_events, __perf_event_task_sched_in(task)); + if (static_branch(&perf_sched_events)) + __perf_event_task_sched_in(task); } static inline @@ -1086,7 +1085,8 @@ void perf_event_task_sched_out(struct task_struct *task, struct task_struct *nex { perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); - COND_STMT(&perf_sched_events, __perf_event_task_sched_out(task, next)); + if (static_branch(&perf_sched_events)) + __perf_event_task_sched_out(task, next); } extern void perf_event_mmap(struct vm_area_struct *vma); diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 97c84a58efb..d530a4460a0 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -29,7 +29,7 @@ struct tracepoint_func { struct tracepoint { const char *name; /* Tracepoint name */ - int state; /* State. */ + struct jump_label_key key; void (*regfunc)(void); void (*unregfunc)(void); struct tracepoint_func __rcu *funcs; @@ -146,9 +146,7 @@ void tracepoint_update_probe_range(struct tracepoint * const *begin, extern struct tracepoint __tracepoint_##name; \ static inline void trace_##name(proto) \ { \ - JUMP_LABEL(&__tracepoint_##name.state, do_trace); \ - return; \ -do_trace: \ + if (static_branch(&__tracepoint_##name.key)) \ __DO_TRACE(&__tracepoint_##name, \ TP_PROTO(data_proto), \ TP_ARGS(data_args), \ @@ -176,14 +174,14 @@ do_trace: \ * structures, so we create an array of pointers that will be used for iteration * on the tracepoints. */ -#define DEFINE_TRACE_FN(name, reg, unreg) \ - static const char __tpstrtab_##name[] \ - __attribute__((section("__tracepoints_strings"))) = #name; \ - struct tracepoint __tracepoint_##name \ - __attribute__((section("__tracepoints"))) = \ - { __tpstrtab_##name, 0, reg, unreg, NULL }; \ - static struct tracepoint * const __tracepoint_ptr_##name __used \ - __attribute__((section("__tracepoints_ptrs"))) = \ +#define DEFINE_TRACE_FN(name, reg, unreg) \ + static const char __tpstrtab_##name[] \ + __attribute__((section("__tracepoints_strings"))) = #name; \ + struct tracepoint __tracepoint_##name \ + __attribute__((section("__tracepoints"))) = \ + { __tpstrtab_##name, JUMP_LABEL_INIT, reg, unreg, NULL };\ + static struct tracepoint * const __tracepoint_ptr_##name __used \ + __attribute__((section("__tracepoints_ptrs"))) = \ &__tracepoint_##name; #define DEFINE_TRACE(name) \ diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 3b79bd93833..74d1c099fbd 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -2,43 +2,23 @@ * jump label support * * Copyright (C) 2009 Jason Baron + * Copyright (C) 2011 Peter Zijlstra * */ -#include #include #include #include #include -#include #include #include #include +#include #ifdef HAVE_JUMP_LABEL -#define JUMP_LABEL_HASH_BITS 6 -#define JUMP_LABEL_TABLE_SIZE (1 << JUMP_LABEL_HASH_BITS) -static struct hlist_head jump_label_table[JUMP_LABEL_TABLE_SIZE]; - /* mutex to protect coming/going of the the jump_label table */ static DEFINE_MUTEX(jump_label_mutex); -struct jump_label_entry { - struct hlist_node hlist; - struct jump_entry *table; - int nr_entries; - /* hang modules off here */ - struct hlist_head modules; - unsigned long key; -}; - -struct jump_label_module_entry { - struct hlist_node hlist; - struct jump_entry *table; - int nr_entries; - struct module *mod; -}; - void jump_label_lock(void) { mutex_lock(&jump_label_mutex); @@ -49,6 +29,11 @@ void jump_label_unlock(void) mutex_unlock(&jump_label_mutex); } +bool jump_label_enabled(struct jump_label_key *key) +{ + return !!atomic_read(&key->enabled); +} + static int jump_label_cmp(const void *a, const void *b) { const struct jump_entry *jea = a; @@ -64,7 +49,7 @@ static int jump_label_cmp(const void *a, const void *b) } static void -sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop) +jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) { unsigned long size; @@ -73,118 +58,25 @@ sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop) sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); } -static struct jump_label_entry *get_jump_label_entry(jump_label_t key) -{ - struct hlist_head *head; - struct hlist_node *node; - struct jump_label_entry *e; - u32 hash = jhash((void *)&key, sizeof(jump_label_t), 0); - - head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; - hlist_for_each_entry(e, node, head, hlist) { - if (key == e->key) - return e; - } - return NULL; -} +static void jump_label_update(struct jump_label_key *key, int enable); -static struct jump_label_entry * -add_jump_label_entry(jump_label_t key, int nr_entries, struct jump_entry *table) +void jump_label_inc(struct jump_label_key *key) { - struct hlist_head *head; - struct jump_label_entry *e; - u32 hash; - - e = get_jump_label_entry(key); - if (e) - return ERR_PTR(-EEXIST); - - e = kmalloc(sizeof(struct jump_label_entry), GFP_KERNEL); - if (!e) - return ERR_PTR(-ENOMEM); - - hash = jhash((void *)&key, sizeof(jump_label_t), 0); - head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; - e->key = key; - e->table = table; - e->nr_entries = nr_entries; - INIT_HLIST_HEAD(&(e->modules)); - hlist_add_head(&e->hlist, head); - return e; -} + if (atomic_inc_not_zero(&key->enabled)) + return; -static int -build_jump_label_hashtable(struct jump_entry *start, struct jump_entry *stop) -{ - struct jump_entry *iter, *iter_begin; - struct jump_label_entry *entry; - int count; - - sort_jump_label_entries(start, stop); - iter = start; - while (iter < stop) { - entry = get_jump_label_entry(iter->key); - if (!entry) { - iter_begin = iter; - count = 0; - while ((iter < stop) && - (iter->key == iter_begin->key)) { - iter++; - count++; - } - entry = add_jump_label_entry(iter_begin->key, - count, iter_begin); - if (IS_ERR(entry)) - return PTR_ERR(entry); - } else { - WARN_ONCE(1, KERN_ERR "build_jump_hashtable: unexpected entry!\n"); - return -1; - } - } - return 0; + jump_label_lock(); + if (atomic_add_return(1, &key->enabled) == 1) + jump_label_update(key, JUMP_LABEL_ENABLE); + jump_label_unlock(); } -/*** - * jump_label_update - update jump label text - * @key - key value associated with a a jump label - * @type - enum set to JUMP_LABEL_ENABLE or JUMP_LABEL_DISABLE - * - * Will enable/disable the jump for jump label @key, depending on the - * value of @type. - * - */ - -void jump_label_update(unsigned long key, enum jump_label_type type) +void jump_label_dec(struct jump_label_key *key) { - struct jump_entry *iter; - struct jump_label_entry *entry; - struct hlist_node *module_node; - struct jump_label_module_entry *e_module; - int count; + if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) + return; - jump_label_lock(); - entry = get_jump_label_entry((jump_label_t)key); - if (entry) { - count = entry->nr_entries; - iter = entry->table; - while (count--) { - if (kernel_text_address(iter->code)) - arch_jump_label_transform(iter, type); - iter++; - } - /* eanble/disable jump labels in modules */ - hlist_for_each_entry(e_module, module_node, &(entry->modules), - hlist) { - count = e_module->nr_entries; - iter = e_module->table; - while (count--) { - if (iter->key && - kernel_text_address(iter->code)) - arch_jump_label_transform(iter, type); - iter++; - } - } - } + jump_label_update(key, JUMP_LABEL_DISABLE); jump_label_unlock(); } @@ -197,77 +89,33 @@ static int addr_conflict(struct jump_entry *entry, void *start, void *end) return 0; } -#ifdef CONFIG_MODULES - -static int module_conflict(void *start, void *end) +static int __jump_label_text_reserved(struct jump_entry *iter_start, + struct jump_entry *iter_stop, void *start, void *end) { - struct hlist_head *head; - struct hlist_node *node, *node_next, *module_node, *module_node_next; - struct jump_label_entry *e; - struct jump_label_module_entry *e_module; struct jump_entry *iter; - int i, count; - int conflict = 0; - - for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { - head = &jump_label_table[i]; - hlist_for_each_entry_safe(e, node, node_next, head, hlist) { - hlist_for_each_entry_safe(e_module, module_node, - module_node_next, - &(e->modules), hlist) { - count = e_module->nr_entries; - iter = e_module->table; - while (count--) { - if (addr_conflict(iter, start, end)) { - conflict = 1; - goto out; - } - iter++; - } - } - } - } -out: - return conflict; -} - -#endif - -/*** - * jump_label_text_reserved - check if addr range is reserved - * @start: start text addr - * @end: end text addr - * - * checks if the text addr located between @start and @end - * overlaps with any of the jump label patch addresses. Code - * that wants to modify kernel text should first verify that - * it does not overlap with any of the jump label addresses. - * Caller must hold jump_label_mutex. - * - * returns 1 if there is an overlap, 0 otherwise - */ -int jump_label_text_reserved(void *start, void *end) -{ - struct jump_entry *iter; - struct jump_entry *iter_start = __start___jump_table; - struct jump_entry *iter_stop = __start___jump_table; - int conflict = 0; iter = iter_start; while (iter < iter_stop) { - if (addr_conflict(iter, start, end)) { - conflict = 1; - goto out; - } + if (addr_conflict(iter, start, end)) + return 1; iter++; } - /* now check modules */ -#ifdef CONFIG_MODULES - conflict = module_conflict(start, end); -#endif -out: - return conflict; + return 0; +} + +static void __jump_label_update(struct jump_label_key *key, + struct jump_entry *entry, int enable) +{ + for (; entry->key == (jump_label_t)(unsigned long)key; entry++) { + /* + * entry->code set to 0 invalidates module init text sections + * kernel_text_address() verifies we are not in core kernel + * init code, see jump_label_invalidate_module_init(). + */ + if (entry->code && kernel_text_address(entry->code)) + arch_jump_label_transform(entry, enable); + } } /* @@ -277,142 +125,173 @@ void __weak arch_jump_label_text_poke_early(jump_label_t addr) { } -static __init int init_jump_label(void) +static __init int jump_label_init(void) { - int ret; struct jump_entry *iter_start = __start___jump_table; struct jump_entry *iter_stop = __stop___jump_table; + struct jump_label_key *key = NULL; struct jump_entry *iter; jump_label_lock(); - ret = build_jump_label_hashtable(__start___jump_table, - __stop___jump_table); - iter = iter_start; - while (iter < iter_stop) { + jump_label_sort_entries(iter_start, iter_stop); + + for (iter = iter_start; iter < iter_stop; iter++) { arch_jump_label_text_poke_early(iter->code); - iter++; + if (iter->key == (jump_label_t)(unsigned long)key) + continue; + + key = (struct jump_label_key *)(unsigned long)iter->key; + atomic_set(&key->enabled, 0); + key->entries = iter; +#ifdef CONFIG_MODULES + key->next = NULL; +#endif } jump_label_unlock(); - return ret; + + return 0; } -early_initcall(init_jump_label); +early_initcall(jump_label_init); #ifdef CONFIG_MODULES -static struct jump_label_module_entry * -add_jump_label_module_entry(struct jump_label_entry *entry, - struct jump_entry *iter_begin, - int count, struct module *mod) +struct jump_label_mod { + struct jump_label_mod *next; + struct jump_entry *entries; + struct module *mod; +}; + +static int __jump_label_mod_text_reserved(void *start, void *end) +{ + struct module *mod; + + mod = __module_text_address((unsigned long)start); + if (!mod) + return 0; + + WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); + + return __jump_label_text_reserved(mod->jump_entries, + mod->jump_entries + mod->num_jump_entries, + start, end); +} + +static void __jump_label_mod_update(struct jump_label_key *key, int enable) +{ + struct jump_label_mod *mod = key->next; + + while (mod) { + __jump_label_update(key, mod->entries, enable); + mod = mod->next; + } +} + +/*** + * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() + * @mod: module to patch + * + * Allow for run-time selection of the optimal nops. Before the module + * loads patch these with arch_get_jump_label_nop(), which is specified by + * the arch specific jump label code. + */ +void jump_label_apply_nops(struct module *mod) { - struct jump_label_module_entry *e; - - e = kmalloc(sizeof(struct jump_label_module_entry), GFP_KERNEL); - if (!e) - return ERR_PTR(-ENOMEM); - e->mod = mod; - e->nr_entries = count; - e->table = iter_begin; - hlist_add_head(&e->hlist, &entry->modules); - return e; + struct jump_entry *iter_start = mod->jump_entries; + struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; + struct jump_entry *iter; + + /* if the module doesn't have jump label entries, just return */ + if (iter_start == iter_stop) + return; + + for (iter = iter_start; iter < iter_stop; iter++) + arch_jump_label_text_poke_early(iter->code); } -static int add_jump_label_module(struct module *mod) +static int jump_label_add_module(struct module *mod) { - struct jump_entry *iter, *iter_begin; - struct jump_label_entry *entry; - struct jump_label_module_entry *module_entry; - int count; + struct jump_entry *iter_start = mod->jump_entries; + struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; + struct jump_entry *iter; + struct jump_label_key *key = NULL; + struct jump_label_mod *jlm; /* if the module doesn't have jump label entries, just return */ - if (!mod->num_jump_entries) + if (iter_start == iter_stop) return 0; - sort_jump_label_entries(mod->jump_entries, - mod->jump_entries + mod->num_jump_entries); - iter = mod->jump_entries; - while (iter < mod->jump_entries + mod->num_jump_entries) { - entry = get_jump_label_entry(iter->key); - iter_begin = iter; - count = 0; - while ((iter < mod->jump_entries + mod->num_jump_entries) && - (iter->key == iter_begin->key)) { - iter++; - count++; - } - if (!entry) { - entry = add_jump_label_entry(iter_begin->key, 0, NULL); - if (IS_ERR(entry)) - return PTR_ERR(entry); + jump_label_sort_entries(iter_start, iter_stop); + + for (iter = iter_start; iter < iter_stop; iter++) { + if (iter->key == (jump_label_t)(unsigned long)key) + continue; + + key = (struct jump_label_key *)(unsigned long)iter->key; + + if (__module_address(iter->key) == mod) { + atomic_set(&key->enabled, 0); + key->entries = iter; + key->next = NULL; + continue; } - module_entry = add_jump_label_module_entry(entry, iter_begin, - count, mod); - if (IS_ERR(module_entry)) - return PTR_ERR(module_entry); + + jlm = kzalloc(sizeof(struct jump_label_mod), GFP_KERNEL); + if (!jlm) + return -ENOMEM; + + jlm->mod = mod; + jlm->entries = iter; + jlm->next = key->next; + key->next = jlm; + + if (jump_label_enabled(key)) + __jump_label_update(key, iter, JUMP_LABEL_ENABLE); } + return 0; } -static void remove_jump_label_module(struct module *mod) +static void jump_label_del_module(struct module *mod) { - struct hlist_head *head; - struct hlist_node *node, *node_next, *module_node, *module_node_next; - struct jump_label_entry *e; - struct jump_label_module_entry *e_module; - int i; + struct jump_entry *iter_start = mod->jump_entries; + struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; + struct jump_entry *iter; + struct jump_label_key *key = NULL; + struct jump_label_mod *jlm, **prev; - /* if the module doesn't have jump label entries, just return */ - if (!mod->num_jump_entries) - return; + for (iter = iter_start; iter < iter_stop; iter++) { + if (iter->key == (jump_label_t)(unsigned long)key) + continue; + + key = (struct jump_label_key *)(unsigned long)iter->key; + + if (__module_address(iter->key) == mod) + continue; + + prev = &key->next; + jlm = key->next; - for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { - head = &jump_label_table[i]; - hlist_for_each_entry_safe(e, node, node_next, head, hlist) { - hlist_for_each_entry_safe(e_module, module_node, - module_node_next, - &(e->modules), hlist) { - if (e_module->mod == mod) { - hlist_del(&e_module->hlist); - kfree(e_module); - } - } - if (hlist_empty(&e->modules) && (e->nr_entries == 0)) { - hlist_del(&e->hlist); - kfree(e); - } + while (jlm && jlm->mod != mod) { + prev = &jlm->next; + jlm = jlm->next; + } + + if (jlm) { + *prev = jlm->next; + kfree(jlm); } } } -static void remove_jump_label_module_init(struct module *mod) +static void jump_label_invalidate_module_init(struct module *mod) { - struct hlist_head *head; - struct hlist_node *node, *node_next, *module_node, *module_node_next; - struct jump_label_entry *e; - struct jump_label_module_entry *e_module; + struct jump_entry *iter_start = mod->jump_entries; + struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; struct jump_entry *iter; - int i, count; - - /* if the module doesn't have jump label entries, just return */ - if (!mod->num_jump_entries) - return; - for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { - head = &jump_label_table[i]; - hlist_for_each_entry_safe(e, node, node_next, head, hlist) { - hlist_for_each_entry_safe(e_module, module_node, - module_node_next, - &(e->modules), hlist) { - if (e_module->mod != mod) - continue; - count = e_module->nr_entries; - iter = e_module->table; - while (count--) { - if (within_module_init(iter->code, mod)) - iter->key = 0; - iter++; - } - } - } + for (iter = iter_start; iter < iter_stop; iter++) { + if (within_module_init(iter->code, mod)) + iter->code = 0; } } @@ -426,59 +305,77 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val, switch (val) { case MODULE_STATE_COMING: jump_label_lock(); - ret = add_jump_label_module(mod); + ret = jump_label_add_module(mod); if (ret) - remove_jump_label_module(mod); + jump_label_del_module(mod); jump_label_unlock(); break; case MODULE_STATE_GOING: jump_label_lock(); - remove_jump_label_module(mod); + jump_label_del_module(mod); jump_label_unlock(); break; case MODULE_STATE_LIVE: jump_label_lock(); - remove_jump_label_module_init(mod); + jump_label_invalidate_module_init(mod); jump_label_unlock(); break; } - return ret; -} -/*** - * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() - * @mod: module to patch - * - * Allow for run-time selection of the optimal nops. Before the module - * loads patch these with arch_get_jump_label_nop(), which is specified by - * the arch specific jump label code. - */ -void jump_label_apply_nops(struct module *mod) -{ - struct jump_entry *iter; - - /* if the module doesn't have jump label entries, just return */ - if (!mod->num_jump_entries) - return; - - iter = mod->jump_entries; - while (iter < mod->jump_entries + mod->num_jump_entries) { - arch_jump_label_text_poke_early(iter->code); - iter++; - } + return notifier_from_errno(ret); } struct notifier_block jump_label_module_nb = { .notifier_call = jump_label_module_notify, - .priority = 0, + .priority = 1, /* higher than tracepoints */ }; -static __init int init_jump_label_module(void) +static __init int jump_label_init_module(void) { return register_module_notifier(&jump_label_module_nb); } -early_initcall(init_jump_label_module); +early_initcall(jump_label_init_module); #endif /* CONFIG_MODULES */ +/*** + * jump_label_text_reserved - check if addr range is reserved + * @start: start text addr + * @end: end text addr + * + * checks if the text addr located between @start and @end + * overlaps with any of the jump label patch addresses. Code + * that wants to modify kernel text should first verify that + * it does not overlap with any of the jump label addresses. + * Caller must hold jump_label_mutex. + * + * returns 1 if there is an overlap, 0 otherwise + */ +int jump_label_text_reserved(void *start, void *end) +{ + int ret = __jump_label_text_reserved(__start___jump_table, + __stop___jump_table, start, end); + + if (ret) + return ret; + +#ifdef CONFIG_MODULES + ret = __jump_label_mod_text_reserved(start, end); +#endif + return ret; +} + +static void jump_label_update(struct jump_label_key *key, int enable) +{ + struct jump_entry *entry = key->entries; + + /* if there are no users, entry can be NULL */ + if (entry) + __jump_label_update(key, entry, enable); + +#ifdef CONFIG_MODULES + __jump_label_mod_update(key, enable); +#endif +} + #endif diff --git a/kernel/perf_event.c b/kernel/perf_event.c index c75925c4d1e..d665e92fbd4 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -125,7 +125,7 @@ enum event_type_t { * perf_sched_events : >0 events exist * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu */ -atomic_t perf_sched_events __read_mostly; +struct jump_label_key perf_sched_events __read_mostly; static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); static atomic_t nr_mmap_events __read_mostly; @@ -5417,7 +5417,7 @@ fail: return err; } -atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; +struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; static void sw_perf_event_destroy(struct perf_event *event) { diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 68187af4889..b219f1449c5 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -251,9 +251,9 @@ static void set_tracepoint(struct tracepoint_entry **entry, { WARN_ON(strcmp((*entry)->name, elem->name) != 0); - if (elem->regfunc && !elem->state && active) + if (elem->regfunc && !jump_label_enabled(&elem->key) && active) elem->regfunc(); - else if (elem->unregfunc && elem->state && !active) + else if (elem->unregfunc && jump_label_enabled(&elem->key) && !active) elem->unregfunc(); /* @@ -264,13 +264,10 @@ static void set_tracepoint(struct tracepoint_entry **entry, * is used. */ rcu_assign_pointer(elem->funcs, (*entry)->funcs); - if (!elem->state && active) { - jump_label_enable(&elem->state); - elem->state = active; - } else if (elem->state && !active) { - jump_label_disable(&elem->state); - elem->state = active; - } + if (active && !jump_label_enabled(&elem->key)) + jump_label_inc(&elem->key); + else if (!active && jump_label_enabled(&elem->key)) + jump_label_dec(&elem->key); } /* @@ -281,13 +278,11 @@ static void set_tracepoint(struct tracepoint_entry **entry, */ static void disable_tracepoint(struct tracepoint *elem) { - if (elem->unregfunc && elem->state) + if (elem->unregfunc && jump_label_enabled(&elem->key)) elem->unregfunc(); - if (elem->state) { - jump_label_disable(&elem->state); - elem->state = 0; - } + if (jump_label_enabled(&elem->key)) + jump_label_dec(&elem->key); rcu_assign_pointer(elem->funcs, NULL); } -- cgit v1.2.3 From ef64789413c73f32faa5e5f1bc393e5843b0aa51 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Wed, 16 Mar 2011 15:58:27 -0400 Subject: jump label: Add _ASM_ALIGN for x86 and x86_64 The linker should not be adding holes to word size aligned pointers, but out of paranoia we are explicitly specifying that alignment. I have not seen any holes in the jump label section in practice. Signed-off-by: Jason Baron LKML-Reference: Acked-by: Peter Zijlstra Acked-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt --- arch/x86/include/asm/jump_label.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index f217cee8653..a32b18ce6ea 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h @@ -16,6 +16,7 @@ static __always_inline bool arch_static_branch(struct jump_label_key *key) asm goto("1:" JUMP_LABEL_INITIAL_NOP ".pushsection __jump_table, \"aw\" \n\t" + _ASM_ALIGN "\n\t" _ASM_PTR "1b, %l[l_yes], %c0 \n\t" ".popsection \n\t" : : "i" (key) : : l_yes); -- cgit v1.2.3 From 5373db886b791b2bc7811e2c115377916c409a5d Mon Sep 17 00:00:00 2001 From: Jan Glauber Date: Wed, 16 Mar 2011 15:58:30 -0400 Subject: jump label: Add s390 support Implement the architecture backend for jump label support on s390. For a shared kernel booted from a NSS silently disable jump labels because the NSS is read-only. Therefore jump labels will be disabled in a shared kernel and can't be activated. Signed-off-by: Jan Glauber LKML-Reference: <6935d2c41ce111e1719176ed4bbd3dbe4de80855.1300299760.git.jbaron@redhat.com> Acked-by: Peter Zijlstra Signed-off-by: Martin Schwidefsky Signed-off-by: Steven Rostedt --- arch/s390/Kconfig | 1 + arch/s390/include/asm/jump_label.h | 37 ++++++++++++++++++++++++ arch/s390/kernel/Makefile | 2 +- arch/s390/kernel/jump_label.c | 59 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 98 insertions(+), 1 deletion(-) create mode 100644 arch/s390/include/asm/jump_label.h create mode 100644 arch/s390/kernel/jump_label.c diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 2508a6f3158..4a7f14079e0 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -88,6 +88,7 @@ config S390 select HAVE_KERNEL_XZ select HAVE_GET_USER_PAGES_FAST select HAVE_ARCH_MUTEX_CPU_RELAX + select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 select ARCH_INLINE_SPIN_TRYLOCK select ARCH_INLINE_SPIN_TRYLOCK_BH select ARCH_INLINE_SPIN_LOCK diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h new file mode 100644 index 00000000000..95a6cf2b5b6 --- /dev/null +++ b/arch/s390/include/asm/jump_label.h @@ -0,0 +1,37 @@ +#ifndef _ASM_S390_JUMP_LABEL_H +#define _ASM_S390_JUMP_LABEL_H + +#include + +#define JUMP_LABEL_NOP_SIZE 6 + +#ifdef CONFIG_64BIT +#define ASM_PTR ".quad" +#define ASM_ALIGN ".balign 8" +#else +#define ASM_PTR ".long" +#define ASM_ALIGN ".balign 4" +#endif + +static __always_inline bool arch_static_branch(struct jump_label_key *key) +{ + asm goto("0: brcl 0,0\n" + ".pushsection __jump_table, \"aw\"\n" + ASM_ALIGN "\n" + ASM_PTR " 0b, %l[label], %0\n" + ".popsection\n" + : : "X" (key) : : label); + return false; +label: + return true; +} + +typedef unsigned long jump_label_t; + +struct jump_entry { + jump_label_t code; + jump_label_t target; + jump_label_t key; +}; + +#endif diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 64230bc392f..5ff15dacb57 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -23,7 +23,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ - vdso.o vtime.o sysinfo.o nmi.o sclp.o + vdso.o vtime.o sysinfo.o nmi.o sclp.o jump_label.o obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c new file mode 100644 index 00000000000..44cc06bedf7 --- /dev/null +++ b/arch/s390/kernel/jump_label.c @@ -0,0 +1,59 @@ +/* + * Jump label s390 support + * + * Copyright IBM Corp. 2011 + * Author(s): Jan Glauber + */ +#include +#include +#include +#include +#include + +#ifdef HAVE_JUMP_LABEL + +struct insn { + u16 opcode; + s32 offset; +} __packed; + +struct insn_args { + unsigned long *target; + struct insn *insn; + ssize_t size; +}; + +static int __arch_jump_label_transform(void *data) +{ + struct insn_args *args = data; + int rc; + + rc = probe_kernel_write(args->target, args->insn, args->size); + WARN_ON_ONCE(rc < 0); + return 0; +} + +void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) +{ + struct insn_args args; + struct insn insn; + + if (type == JUMP_LABEL_ENABLE) { + /* brcl 15,offset */ + insn.opcode = 0xc0f4; + insn.offset = (entry->target - entry->code) >> 1; + } else { + /* brcl 0,0 */ + insn.opcode = 0xc004; + insn.offset = 0; + } + + args.target = (void *) entry->code; + args.insn = &insn; + args.size = JUMP_LABEL_NOP_SIZE; + + stop_machine(__arch_jump_label_transform, &args, NULL); +} + +#endif -- cgit v1.2.3 From c8e5910edf8bbe2e5c6c35a4ef2a578cc7893b25 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Sat, 16 Apr 2011 02:27:55 +0200 Subject: perf, x86: Use ALTERNATIVE() to check for X86_FEATURE_PERFCTR_CORE Using ALTERNATIVE() when checking for X86_FEATURE_PERFCTR_CORE avoids an extra pointer chase and data cache hit. Signed-off-by: Robert Richter Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1302913676-14352-4-git-send-email-robert.richter@amd.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index eed3673a865..224a84f7080 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -31,6 +31,7 @@ #include #include #include +#include #if 0 #undef wrmsrl @@ -363,12 +364,18 @@ again: return new_raw_count; } -/* using X86_FEATURE_PERFCTR_CORE to later implement ALTERNATIVE() here */ static inline int x86_pmu_addr_offset(int index) { - if (boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) - return index << 1; - return index; + int offset; + + /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */ + alternative_io(ASM_NOP2, + "shll $1, %%eax", + X86_FEATURE_PERFCTR_CORE, + "=a" (offset), + "a" (index)); + + return offset; } static inline unsigned int x86_pmu_config_addr(int index) -- cgit v1.2.3 From aeafcbaf4fcfeb74aeed65609ea5ead48dfc09f8 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 31 Mar 2011 10:56:28 -0300 Subject: perf symbols: Give more useful names to 'self' parameters One more installment on an area that is mostly dormant. Suggested-by: Thomas Gleixner Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Tom Zanussi LKML-Reference: Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/symbol.c | 629 ++++++++++++++++++++++++----------------------- tools/perf/util/symbol.h | 78 +++--- 2 files changed, 361 insertions(+), 346 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index f06c10f092b..516876dfbe5 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -31,13 +31,13 @@ #define NT_GNU_BUILD_ID 3 #endif -static bool dso__build_id_equal(const struct dso *self, u8 *build_id); +static bool dso__build_id_equal(const struct dso *dso, u8 *build_id); static int elf_read_build_id(Elf *elf, void *bf, size_t size); static void dsos__add(struct list_head *head, struct dso *dso); static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); -static int dso__load_kernel_sym(struct dso *self, struct map *map, +static int dso__load_kernel_sym(struct dso *dso, struct map *map, symbol_filter_t filter); -static int dso__load_guest_kernel_sym(struct dso *self, struct map *map, +static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, symbol_filter_t filter); static int vmlinux_path__nr_entries; static char **vmlinux_path; @@ -49,27 +49,27 @@ struct symbol_conf symbol_conf = { .symfs = "", }; -int dso__name_len(const struct dso *self) +int dso__name_len(const struct dso *dso) { if (verbose) - return self->long_name_len; + return dso->long_name_len; - return self->short_name_len; + return dso->short_name_len; } -bool dso__loaded(const struct dso *self, enum map_type type) +bool dso__loaded(const struct dso *dso, enum map_type type) { - return self->loaded & (1 << type); + return dso->loaded & (1 << type); } -bool dso__sorted_by_name(const struct dso *self, enum map_type type) +bool dso__sorted_by_name(const struct dso *dso, enum map_type type) { - return self->sorted_by_name & (1 << type); + return dso->sorted_by_name & (1 << type); } -static void dso__set_sorted_by_name(struct dso *self, enum map_type type) +static void dso__set_sorted_by_name(struct dso *dso, enum map_type type) { - self->sorted_by_name |= (1 << type); + dso->sorted_by_name |= (1 << type); } bool symbol_type__is_a(char symbol_type, enum map_type map_type) @@ -84,9 +84,9 @@ bool symbol_type__is_a(char symbol_type, enum map_type map_type) } } -static void symbols__fixup_end(struct rb_root *self) +static void symbols__fixup_end(struct rb_root *symbols) { - struct rb_node *nd, *prevnd = rb_first(self); + struct rb_node *nd, *prevnd = rb_first(symbols); struct symbol *curr, *prev; if (prevnd == NULL) @@ -107,10 +107,10 @@ static void symbols__fixup_end(struct rb_root *self) curr->end = roundup(curr->start, 4096); } -static void __map_groups__fixup_end(struct map_groups *self, enum map_type type) +static void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) { struct map *prev, *curr; - struct rb_node *nd, *prevnd = rb_first(&self->maps[type]); + struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]); if (prevnd == NULL) return; @@ -130,128 +130,128 @@ static void __map_groups__fixup_end(struct map_groups *self, enum map_type type) curr->end = ~0ULL; } -static void map_groups__fixup_end(struct map_groups *self) +static void map_groups__fixup_end(struct map_groups *mg) { int i; for (i = 0; i < MAP__NR_TYPES; ++i) - __map_groups__fixup_end(self, i); + __map_groups__fixup_end(mg, i); } static struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) { size_t namelen = strlen(name) + 1; - struct symbol *self = calloc(1, (symbol_conf.priv_size + - sizeof(*self) + namelen)); - if (self == NULL) + struct symbol *sym = calloc(1, (symbol_conf.priv_size + + sizeof(*sym) + namelen)); + if (sym == NULL) return NULL; if (symbol_conf.priv_size) - self = ((void *)self) + symbol_conf.priv_size; - - self->start = start; - self->end = len ? start + len - 1 : start; - self->binding = binding; - self->namelen = namelen - 1; + sym = ((void *)sym) + symbol_conf.priv_size; - pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", __func__, name, start, self->end); + sym->start = start; + sym->end = len ? start + len - 1 : start; + sym->binding = binding; + sym->namelen = namelen - 1; - memcpy(self->name, name, namelen); + pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", + __func__, name, start, sym->end); + memcpy(sym->name, name, namelen); - return self; + return sym; } -void symbol__delete(struct symbol *self) +void symbol__delete(struct symbol *sym) { - free(((void *)self) - symbol_conf.priv_size); + free(((void *)sym) - symbol_conf.priv_size); } -static size_t symbol__fprintf(struct symbol *self, FILE *fp) +static size_t symbol__fprintf(struct symbol *sym, FILE *fp) { return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n", - self->start, self->end, - self->binding == STB_GLOBAL ? 'g' : - self->binding == STB_LOCAL ? 'l' : 'w', - self->name); + sym->start, sym->end, + sym->binding == STB_GLOBAL ? 'g' : + sym->binding == STB_LOCAL ? 'l' : 'w', + sym->name); } -void dso__set_long_name(struct dso *self, char *name) +void dso__set_long_name(struct dso *dso, char *name) { if (name == NULL) return; - self->long_name = name; - self->long_name_len = strlen(name); + dso->long_name = name; + dso->long_name_len = strlen(name); } -static void dso__set_short_name(struct dso *self, const char *name) +static void dso__set_short_name(struct dso *dso, const char *name) { if (name == NULL) return; - self->short_name = name; - self->short_name_len = strlen(name); + dso->short_name = name; + dso->short_name_len = strlen(name); } -static void dso__set_basename(struct dso *self) +static void dso__set_basename(struct dso *dso) { - dso__set_short_name(self, basename(self->long_name)); + dso__set_short_name(dso, basename(dso->long_name)); } struct dso *dso__new(const char *name) { - struct dso *self = calloc(1, sizeof(*self) + strlen(name) + 1); + struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1); - if (self != NULL) { + if (dso != NULL) { int i; - strcpy(self->name, name); - dso__set_long_name(self, self->name); - dso__set_short_name(self, self->name); + strcpy(dso->name, name); + dso__set_long_name(dso, dso->name); + dso__set_short_name(dso, dso->name); for (i = 0; i < MAP__NR_TYPES; ++i) - self->symbols[i] = self->symbol_names[i] = RB_ROOT; - self->symtab_type = SYMTAB__NOT_FOUND; - self->loaded = 0; - self->sorted_by_name = 0; - self->has_build_id = 0; - self->kernel = DSO_TYPE_USER; - INIT_LIST_HEAD(&self->node); + dso->symbols[i] = dso->symbol_names[i] = RB_ROOT; + dso->symtab_type = SYMTAB__NOT_FOUND; + dso->loaded = 0; + dso->sorted_by_name = 0; + dso->has_build_id = 0; + dso->kernel = DSO_TYPE_USER; + INIT_LIST_HEAD(&dso->node); } - return self; + return dso; } -static void symbols__delete(struct rb_root *self) +static void symbols__delete(struct rb_root *symbols) { struct symbol *pos; - struct rb_node *next = rb_first(self); + struct rb_node *next = rb_first(symbols); while (next) { pos = rb_entry(next, struct symbol, rb_node); next = rb_next(&pos->rb_node); - rb_erase(&pos->rb_node, self); + rb_erase(&pos->rb_node, symbols); symbol__delete(pos); } } -void dso__delete(struct dso *self) +void dso__delete(struct dso *dso) { int i; for (i = 0; i < MAP__NR_TYPES; ++i) - symbols__delete(&self->symbols[i]); - if (self->sname_alloc) - free((char *)self->short_name); - if (self->lname_alloc) - free(self->long_name); - free(self); + symbols__delete(&dso->symbols[i]); + if (dso->sname_alloc) + free((char *)dso->short_name); + if (dso->lname_alloc) + free(dso->long_name); + free(dso); } -void dso__set_build_id(struct dso *self, void *build_id) +void dso__set_build_id(struct dso *dso, void *build_id) { - memcpy(self->build_id, build_id, sizeof(self->build_id)); - self->has_build_id = 1; + memcpy(dso->build_id, build_id, sizeof(dso->build_id)); + dso->has_build_id = 1; } -static void symbols__insert(struct rb_root *self, struct symbol *sym) +static void symbols__insert(struct rb_root *symbols, struct symbol *sym) { - struct rb_node **p = &self->rb_node; + struct rb_node **p = &symbols->rb_node; struct rb_node *parent = NULL; const u64 ip = sym->start; struct symbol *s; @@ -265,17 +265,17 @@ static void symbols__insert(struct rb_root *self, struct symbol *sym) p = &(*p)->rb_right; } rb_link_node(&sym->rb_node, parent, p); - rb_insert_color(&sym->rb_node, self); + rb_insert_color(&sym->rb_node, symbols); } -static struct symbol *symbols__find(struct rb_root *self, u64 ip) +static struct symbol *symbols__find(struct rb_root *symbols, u64 ip) { struct rb_node *n; - if (self == NULL) + if (symbols == NULL) return NULL; - n = self->rb_node; + n = symbols->rb_node; while (n) { struct symbol *s = rb_entry(n, struct symbol, rb_node); @@ -296,9 +296,9 @@ struct symbol_name_rb_node { struct symbol sym; }; -static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym) +static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym) { - struct rb_node **p = &self->rb_node; + struct rb_node **p = &symbols->rb_node; struct rb_node *parent = NULL; struct symbol_name_rb_node *symn, *s; @@ -313,27 +313,29 @@ static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym) p = &(*p)->rb_right; } rb_link_node(&symn->rb_node, parent, p); - rb_insert_color(&symn->rb_node, self); + rb_insert_color(&symn->rb_node, symbols); } -static void symbols__sort_by_name(struct rb_root *self, struct rb_root *source) +static void symbols__sort_by_name(struct rb_root *symbols, + struct rb_root *source) { struct rb_node *nd; for (nd = rb_first(source); nd; nd = rb_next(nd)) { struct symbol *pos = rb_entry(nd, struct symbol, rb_node); - symbols__insert_by_name(self, pos); + symbols__insert_by_name(symbols, pos); } } -static struct symbol *symbols__find_by_name(struct rb_root *self, const char *name) +static struct symbol *symbols__find_by_name(struct rb_root *symbols, + const char *name) { struct rb_node *n; - if (self == NULL) + if (symbols == NULL) return NULL; - n = self->rb_node; + n = symbols->rb_node; while (n) { struct symbol_name_rb_node *s; @@ -353,29 +355,29 @@ static struct symbol *symbols__find_by_name(struct rb_root *self, const char *na return NULL; } -struct symbol *dso__find_symbol(struct dso *self, +struct symbol *dso__find_symbol(struct dso *dso, enum map_type type, u64 addr) { - return symbols__find(&self->symbols[type], addr); + return symbols__find(&dso->symbols[type], addr); } -struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type, +struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, const char *name) { - return symbols__find_by_name(&self->symbol_names[type], name); + return symbols__find_by_name(&dso->symbol_names[type], name); } -void dso__sort_by_name(struct dso *self, enum map_type type) +void dso__sort_by_name(struct dso *dso, enum map_type type) { - dso__set_sorted_by_name(self, type); - return symbols__sort_by_name(&self->symbol_names[type], - &self->symbols[type]); + dso__set_sorted_by_name(dso, type); + return symbols__sort_by_name(&dso->symbol_names[type], + &dso->symbols[type]); } -int build_id__sprintf(const u8 *self, int len, char *bf) +int build_id__sprintf(const u8 *build_id, int len, char *bf) { char *bid = bf; - const u8 *raw = self; + const u8 *raw = build_id; int i; for (i = 0; i < len; ++i) { @@ -384,24 +386,25 @@ int build_id__sprintf(const u8 *self, int len, char *bf) bid += 2; } - return raw - self; + return raw - build_id; } -size_t dso__fprintf_buildid(struct dso *self, FILE *fp) +size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) { char sbuild_id[BUILD_ID_SIZE * 2 + 1]; - build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id); + build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); return fprintf(fp, "%s", sbuild_id); } -size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE *fp) +size_t dso__fprintf_symbols_by_name(struct dso *dso, + enum map_type type, FILE *fp) { size_t ret = 0; struct rb_node *nd; struct symbol_name_rb_node *pos; - for (nd = rb_first(&self->symbol_names[type]); nd; nd = rb_next(nd)) { + for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) { pos = rb_entry(nd, struct symbol_name_rb_node, rb_node); fprintf(fp, "%s\n", pos->sym.name); } @@ -409,18 +412,18 @@ size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE * return ret; } -size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp) +size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp) { struct rb_node *nd; - size_t ret = fprintf(fp, "dso: %s (", self->short_name); + size_t ret = fprintf(fp, "dso: %s (", dso->short_name); - if (self->short_name != self->long_name) - ret += fprintf(fp, "%s, ", self->long_name); + if (dso->short_name != dso->long_name) + ret += fprintf(fp, "%s, ", dso->long_name); ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type], - self->loaded ? "" : "NOT "); - ret += dso__fprintf_buildid(self, fp); + dso->loaded ? "" : "NOT "); + ret += dso__fprintf_buildid(dso, fp); ret += fprintf(fp, ")\n"); - for (nd = rb_first(&self->symbols[type]); nd; nd = rb_next(nd)) { + for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) { struct symbol *pos = rb_entry(nd, struct symbol, rb_node); ret += symbol__fprintf(pos, fp); } @@ -543,10 +546,10 @@ static int map__process_kallsym_symbol(void *arg, const char *name, * so that we can in the next step set the symbol ->end address and then * call kernel_maps__split_kallsyms. */ -static int dso__load_all_kallsyms(struct dso *self, const char *filename, +static int dso__load_all_kallsyms(struct dso *dso, const char *filename, struct map *map) { - struct process_kallsyms_args args = { .map = map, .dso = self, }; + struct process_kallsyms_args args = { .map = map, .dso = dso, }; return kallsyms__parse(filename, &args, map__process_kallsym_symbol); } @@ -555,7 +558,7 @@ static int dso__load_all_kallsyms(struct dso *self, const char *filename, * kernel range is broken in several maps, named [kernel].N, as we don't have * the original ELF section names vmlinux have. */ -static int dso__split_kallsyms(struct dso *self, struct map *map, +static int dso__split_kallsyms(struct dso *dso, struct map *map, symbol_filter_t filter) { struct map_groups *kmaps = map__kmap(map)->kmaps; @@ -563,7 +566,7 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, struct map *curr_map = map; struct symbol *pos; int count = 0, moved = 0; - struct rb_root *root = &self->symbols[map->type]; + struct rb_root *root = &dso->symbols[map->type]; struct rb_node *next = rb_first(root); int kernel_range = 0; @@ -582,7 +585,7 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, if (strcmp(curr_map->dso->short_name, module)) { if (curr_map != map && - self->kernel == DSO_TYPE_GUEST_KERNEL && + dso->kernel == DSO_TYPE_GUEST_KERNEL && machine__is_default_guest(machine)) { /* * We assume all symbols of a module are @@ -618,14 +621,14 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, pos->end = curr_map->map_ip(curr_map, pos->end); } else if (curr_map != map) { char dso_name[PATH_MAX]; - struct dso *dso; + struct dso *ndso; if (count == 0) { curr_map = map; goto filter_symbol; } - if (self->kernel == DSO_TYPE_GUEST_KERNEL) + if (dso->kernel == DSO_TYPE_GUEST_KERNEL) snprintf(dso_name, sizeof(dso_name), "[guest.kernel].%d", kernel_range++); @@ -634,15 +637,15 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, "[kernel].%d", kernel_range++); - dso = dso__new(dso_name); - if (dso == NULL) + ndso = dso__new(dso_name); + if (ndso == NULL) return -1; - dso->kernel = self->kernel; + ndso->kernel = dso->kernel; - curr_map = map__new2(pos->start, dso, map->type); + curr_map = map__new2(pos->start, ndso, map->type); if (curr_map == NULL) { - dso__delete(dso); + dso__delete(ndso); return -1; } @@ -665,7 +668,7 @@ discard_symbol: rb_erase(&pos->rb_node, root); } if (curr_map != map && - self->kernel == DSO_TYPE_GUEST_KERNEL && + dso->kernel == DSO_TYPE_GUEST_KERNEL && machine__is_default_guest(kmaps->machine)) { dso__set_loaded(curr_map->dso, curr_map->type); } @@ -673,21 +676,21 @@ discard_symbol: rb_erase(&pos->rb_node, root); return count + moved; } -int dso__load_kallsyms(struct dso *self, const char *filename, +int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map, symbol_filter_t filter) { - if (dso__load_all_kallsyms(self, filename, map) < 0) + if (dso__load_all_kallsyms(dso, filename, map) < 0) return -1; - if (self->kernel == DSO_TYPE_GUEST_KERNEL) - self->symtab_type = SYMTAB__GUEST_KALLSYMS; + if (dso->kernel == DSO_TYPE_GUEST_KERNEL) + dso->symtab_type = SYMTAB__GUEST_KALLSYMS; else - self->symtab_type = SYMTAB__KALLSYMS; + dso->symtab_type = SYMTAB__KALLSYMS; - return dso__split_kallsyms(self, map, filter); + return dso__split_kallsyms(dso, map, filter); } -static int dso__load_perf_map(struct dso *self, struct map *map, +static int dso__load_perf_map(struct dso *dso, struct map *map, symbol_filter_t filter) { char *line = NULL; @@ -695,7 +698,7 @@ static int dso__load_perf_map(struct dso *self, struct map *map, FILE *file; int nr_syms = 0; - file = fopen(self->long_name, "r"); + file = fopen(dso->long_name, "r"); if (file == NULL) goto out_failure; @@ -733,7 +736,7 @@ static int dso__load_perf_map(struct dso *self, struct map *map, if (filter && filter(map, sym)) symbol__delete(sym); else { - symbols__insert(&self->symbols[map->type], sym); + symbols__insert(&dso->symbols[map->type], sym); nr_syms++; } } @@ -752,7 +755,7 @@ out_failure: /** * elf_symtab__for_each_symbol - iterate thru all the symbols * - * @self: struct elf_symtab instance to iterate + * @syms: struct elf_symtab instance to iterate * @idx: uint32_t idx * @sym: GElf_Sym iterator */ @@ -852,7 +855,7 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, * And always look at the original dso, not at debuginfo packages, that * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). */ -static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, +static int dso__synthesize_plt_symbols(struct dso *dso, struct map *map, symbol_filter_t filter) { uint32_t nr_rel_entries, idx; @@ -871,7 +874,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, char name[PATH_MAX]; snprintf(name, sizeof(name), "%s%s", - symbol_conf.symfs, self->long_name); + symbol_conf.symfs, dso->long_name); fd = open(name, O_RDONLY); if (fd < 0) goto out; @@ -947,7 +950,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, if (filter && filter(map, f)) symbol__delete(f); else { - symbols__insert(&self->symbols[map->type], f); + symbols__insert(&dso->symbols[map->type], f); ++nr; } } @@ -969,7 +972,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, if (filter && filter(map, f)) symbol__delete(f); else { - symbols__insert(&self->symbols[map->type], f); + symbols__insert(&dso->symbols[map->type], f); ++nr; } } @@ -985,29 +988,30 @@ out_close: return nr; out: pr_debug("%s: problems reading %s PLT info.\n", - __func__, self->long_name); + __func__, dso->long_name); return 0; } -static bool elf_sym__is_a(GElf_Sym *self, enum map_type type) +static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type) { switch (type) { case MAP__FUNCTION: - return elf_sym__is_function(self); + return elf_sym__is_function(sym); case MAP__VARIABLE: - return elf_sym__is_object(self); + return elf_sym__is_object(sym); default: return false; } } -static bool elf_sec__is_a(GElf_Shdr *self, Elf_Data *secstrs, enum map_type type) +static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs, + enum map_type type) { switch (type) { case MAP__FUNCTION: - return elf_sec__is_text(self, secstrs); + return elf_sec__is_text(shdr, secstrs); case MAP__VARIABLE: - return elf_sec__is_data(self, secstrs); + return elf_sec__is_data(shdr, secstrs); default: return false; } @@ -1032,13 +1036,13 @@ static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) return -1; } -static int dso__load_sym(struct dso *self, struct map *map, const char *name, +static int dso__load_sym(struct dso *dso, struct map *map, const char *name, int fd, symbol_filter_t filter, int kmodule, int want_symtab) { - struct kmap *kmap = self->kernel ? map__kmap(map) : NULL; + struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL; struct map *curr_map = map; - struct dso *curr_dso = self; + struct dso *curr_dso = dso; Elf_Data *symstrs, *secstrs; uint32_t nr_syms; int err = -1; @@ -1064,14 +1068,14 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, } /* Always reject images with a mismatched build-id: */ - if (self->has_build_id) { + if (dso->has_build_id) { u8 build_id[BUILD_ID_SIZE]; if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) != BUILD_ID_SIZE) goto out_elf_end; - if (!dso__build_id_equal(self, build_id)) + if (!dso__build_id_equal(dso, build_id)) goto out_elf_end; } @@ -1112,13 +1116,14 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, nr_syms = shdr.sh_size / shdr.sh_entsize; memset(&sym, 0, sizeof(sym)); - if (self->kernel == DSO_TYPE_USER) { - self->adjust_symbols = (ehdr.e_type == ET_EXEC || + if (dso->kernel == DSO_TYPE_USER) { + dso->adjust_symbols = (ehdr.e_type == ET_EXEC || elf_section_by_name(elf, &ehdr, &shdr, ".gnu.prelink_undo", NULL) != NULL); - } else self->adjust_symbols = 0; - + } else { + dso->adjust_symbols = 0; + } elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { struct symbol *f; const char *elf_name = elf_sym__name(&sym, symstrs); @@ -1168,22 +1173,22 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, (sym.st_value & 1)) --sym.st_value; - if (self->kernel != DSO_TYPE_USER || kmodule) { + if (dso->kernel != DSO_TYPE_USER || kmodule) { char dso_name[PATH_MAX]; if (strcmp(section_name, (curr_dso->short_name + - self->short_name_len)) == 0) + dso->short_name_len)) == 0) goto new_symbol; if (strcmp(section_name, ".text") == 0) { curr_map = map; - curr_dso = self; + curr_dso = dso; goto new_symbol; } snprintf(dso_name, sizeof(dso_name), - "%s%s", self->short_name, section_name); + "%s%s", dso->short_name, section_name); curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name); if (curr_map == NULL) { @@ -1195,9 +1200,9 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, curr_dso = dso__new(dso_name); if (curr_dso == NULL) goto out_elf_end; - curr_dso->kernel = self->kernel; - curr_dso->long_name = self->long_name; - curr_dso->long_name_len = self->long_name_len; + curr_dso->kernel = dso->kernel; + curr_dso->long_name = dso->long_name; + curr_dso->long_name_len = dso->long_name_len; curr_map = map__new2(start, curr_dso, map->type); if (curr_map == NULL) { @@ -1206,9 +1211,9 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, } curr_map->map_ip = identity__map_ip; curr_map->unmap_ip = identity__map_ip; - curr_dso->symtab_type = self->symtab_type; + curr_dso->symtab_type = dso->symtab_type; map_groups__insert(kmap->kmaps, curr_map); - dsos__add(&self->node, curr_dso); + dsos__add(&dso->node, curr_dso); dso__set_loaded(curr_dso, map->type); } else curr_dso = curr_map->dso; @@ -1250,7 +1255,7 @@ new_symbol: * For misannotated, zeroed, ASM function sizes. */ if (nr > 0) { - symbols__fixup_end(&self->symbols[map->type]); + symbols__fixup_end(&dso->symbols[map->type]); if (kmap) { /* * We need to fixup this here too because we create new @@ -1266,9 +1271,9 @@ out_close: return err; } -static bool dso__build_id_equal(const struct dso *self, u8 *build_id) +static bool dso__build_id_equal(const struct dso *dso, u8 *build_id) { - return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0; + return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0; } bool __dsos__read_build_ids(struct list_head *head, bool with_hits) @@ -1429,7 +1434,7 @@ out: return err; } -char dso__symtab_origin(const struct dso *self) +char dso__symtab_origin(const struct dso *dso) { static const char origin[] = { [SYMTAB__KALLSYMS] = 'k', @@ -1444,12 +1449,12 @@ char dso__symtab_origin(const struct dso *self) [SYMTAB__GUEST_KMODULE] = 'G', }; - if (self == NULL || self->symtab_type == SYMTAB__NOT_FOUND) + if (dso == NULL || dso->symtab_type == SYMTAB__NOT_FOUND) return '!'; - return origin[self->symtab_type]; + return origin[dso->symtab_type]; } -int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) +int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) { int size = PATH_MAX; char *name; @@ -1459,12 +1464,12 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) const char *root_dir; int want_symtab; - dso__set_loaded(self, map->type); + dso__set_loaded(dso, map->type); - if (self->kernel == DSO_TYPE_KERNEL) - return dso__load_kernel_sym(self, map, filter); - else if (self->kernel == DSO_TYPE_GUEST_KERNEL) - return dso__load_guest_kernel_sym(self, map, filter); + if (dso->kernel == DSO_TYPE_KERNEL) + return dso__load_kernel_sym(dso, map, filter); + else if (dso->kernel == DSO_TYPE_GUEST_KERNEL) + return dso__load_guest_kernel_sym(dso, map, filter); if (map->groups && map->groups->machine) machine = map->groups->machine; @@ -1475,11 +1480,11 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) if (!name) return -1; - self->adjust_symbols = 0; + dso->adjust_symbols = 0; - if (strncmp(self->name, "/tmp/perf-", 10) == 0) { - ret = dso__load_perf_map(self, map, filter); - self->symtab_type = ret > 0 ? SYMTAB__JAVA_JIT : + if (strncmp(dso->name, "/tmp/perf-", 10) == 0) { + ret = dso__load_perf_map(dso, map, filter); + dso->symtab_type = ret > 0 ? SYMTAB__JAVA_JIT : SYMTAB__NOT_FOUND; return ret; } @@ -1490,33 +1495,33 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) */ want_symtab = 1; restart: - for (self->symtab_type = SYMTAB__BUILD_ID_CACHE; - self->symtab_type != SYMTAB__NOT_FOUND; - self->symtab_type++) { - switch (self->symtab_type) { + for (dso->symtab_type = SYMTAB__BUILD_ID_CACHE; + dso->symtab_type != SYMTAB__NOT_FOUND; + dso->symtab_type++) { + switch (dso->symtab_type) { case SYMTAB__BUILD_ID_CACHE: /* skip the locally configured cache if a symfs is given */ if (symbol_conf.symfs[0] || - (dso__build_id_filename(self, name, size) == NULL)) { + (dso__build_id_filename(dso, name, size) == NULL)) { continue; } break; case SYMTAB__FEDORA_DEBUGINFO: snprintf(name, size, "%s/usr/lib/debug%s.debug", - symbol_conf.symfs, self->long_name); + symbol_conf.symfs, dso->long_name); break; case SYMTAB__UBUNTU_DEBUGINFO: snprintf(name, size, "%s/usr/lib/debug%s", - symbol_conf.symfs, self->long_name); + symbol_conf.symfs, dso->long_name); break; case SYMTAB__BUILDID_DEBUGINFO: { char build_id_hex[BUILD_ID_SIZE * 2 + 1]; - if (!self->has_build_id) + if (!dso->has_build_id) continue; - build_id__sprintf(self->build_id, - sizeof(self->build_id), + build_id__sprintf(dso->build_id, + sizeof(dso->build_id), build_id_hex); snprintf(name, size, "%s/usr/lib/debug/.build-id/%.2s/%s.debug", @@ -1525,7 +1530,7 @@ restart: break; case SYMTAB__SYSTEM_PATH_DSO: snprintf(name, size, "%s%s", - symbol_conf.symfs, self->long_name); + symbol_conf.symfs, dso->long_name); break; case SYMTAB__GUEST_KMODULE: if (map->groups && machine) @@ -1533,12 +1538,12 @@ restart: else root_dir = ""; snprintf(name, size, "%s%s%s", symbol_conf.symfs, - root_dir, self->long_name); + root_dir, dso->long_name); break; case SYMTAB__SYSTEM_PATH_KMODULE: snprintf(name, size, "%s%s", symbol_conf.symfs, - self->long_name); + dso->long_name); break; default:; } @@ -1548,7 +1553,7 @@ restart: if (fd < 0) continue; - ret = dso__load_sym(self, map, name, fd, filter, 0, + ret = dso__load_sym(dso, map, name, fd, filter, 0, want_symtab); close(fd); @@ -1560,7 +1565,8 @@ restart: continue; if (ret > 0) { - int nr_plt = dso__synthesize_plt_symbols(self, map, filter); + int nr_plt = dso__synthesize_plt_symbols(dso, map, + filter); if (nr_plt > 0) ret += nr_plt; break; @@ -1577,17 +1583,17 @@ restart: } free(name); - if (ret < 0 && strstr(self->name, " (deleted)") != NULL) + if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) return 0; return ret; } -struct map *map_groups__find_by_name(struct map_groups *self, +struct map *map_groups__find_by_name(struct map_groups *mg, enum map_type type, const char *name) { struct rb_node *nd; - for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) { + for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { struct map *map = rb_entry(nd, struct map, rb_node); if (map->dso && strcmp(map->dso->short_name, name) == 0) @@ -1597,28 +1603,28 @@ struct map *map_groups__find_by_name(struct map_groups *self, return NULL; } -static int dso__kernel_module_get_build_id(struct dso *self, - const char *root_dir) +static int dso__kernel_module_get_build_id(struct dso *dso, + const char *root_dir) { char filename[PATH_MAX]; /* * kernel module short names are of the form "[module]" and * we need just "module" here. */ - const char *name = self->short_name + 1; + const char *name = dso->short_name + 1; snprintf(filename, sizeof(filename), "%s/sys/module/%.*s/notes/.note.gnu.build-id", root_dir, (int)strlen(name) - 1, name); - if (sysfs__read_build_id(filename, self->build_id, - sizeof(self->build_id)) == 0) - self->has_build_id = true; + if (sysfs__read_build_id(filename, dso->build_id, + sizeof(dso->build_id)) == 0) + dso->has_build_id = true; return 0; } -static int map_groups__set_modules_path_dir(struct map_groups *self, +static int map_groups__set_modules_path_dir(struct map_groups *mg, const char *dir_name) { struct dirent *dent; @@ -1646,7 +1652,7 @@ static int map_groups__set_modules_path_dir(struct map_groups *self, snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); - ret = map_groups__set_modules_path_dir(self, path); + ret = map_groups__set_modules_path_dir(mg, path); if (ret < 0) goto out; } else { @@ -1661,7 +1667,8 @@ static int map_groups__set_modules_path_dir(struct map_groups *self, (int)(dot - dent->d_name), dent->d_name); strxfrchar(dso_name, '-', '_'); - map = map_groups__find_by_name(self, MAP__FUNCTION, dso_name); + map = map_groups__find_by_name(mg, MAP__FUNCTION, + dso_name); if (map == NULL) continue; @@ -1711,20 +1718,20 @@ static char *get_kernel_version(const char *root_dir) return strdup(name); } -static int machine__set_modules_path(struct machine *self) +static int machine__set_modules_path(struct machine *machine) { char *version; char modules_path[PATH_MAX]; - version = get_kernel_version(self->root_dir); + version = get_kernel_version(machine->root_dir); if (!version) return -1; snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel", - self->root_dir, version); + machine->root_dir, version); free(version); - return map_groups__set_modules_path_dir(&self->kmaps, modules_path); + return map_groups__set_modules_path_dir(&machine->kmaps, modules_path); } /* @@ -1734,23 +1741,23 @@ static int machine__set_modules_path(struct machine *self) */ static struct map *map__new2(u64 start, struct dso *dso, enum map_type type) { - struct map *self = calloc(1, (sizeof(*self) + - (dso->kernel ? sizeof(struct kmap) : 0))); - if (self != NULL) { + struct map *map = calloc(1, (sizeof(*map) + + (dso->kernel ? sizeof(struct kmap) : 0))); + if (map != NULL) { /* * ->end will be filled after we load all the symbols */ - map__init(self, type, start, 0, 0, dso); + map__init(map, type, start, 0, 0, dso); } - return self; + return map; } -struct map *machine__new_module(struct machine *self, u64 start, +struct map *machine__new_module(struct machine *machine, u64 start, const char *filename) { struct map *map; - struct dso *dso = __dsos__findnew(&self->kernel_dsos, filename); + struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename); if (dso == NULL) return NULL; @@ -1759,15 +1766,15 @@ struct map *machine__new_module(struct machine *self, u64 start, if (map == NULL) return NULL; - if (machine__is_host(self)) + if (machine__is_host(machine)) dso->symtab_type = SYMTAB__SYSTEM_PATH_KMODULE; else dso->symtab_type = SYMTAB__GUEST_KMODULE; - map_groups__insert(&self->kmaps, map); + map_groups__insert(&machine->kmaps, map); return map; } -static int machine__create_modules(struct machine *self) +static int machine__create_modules(struct machine *machine) { char *line = NULL; size_t n; @@ -1776,10 +1783,10 @@ static int machine__create_modules(struct machine *self) const char *modules; char path[PATH_MAX]; - if (machine__is_default_guest(self)) + if (machine__is_default_guest(machine)) modules = symbol_conf.default_guest_modules; else { - sprintf(path, "%s/proc/modules", self->root_dir); + sprintf(path, "%s/proc/modules", machine->root_dir); modules = path; } @@ -1815,16 +1822,16 @@ static int machine__create_modules(struct machine *self) *sep = '\0'; snprintf(name, sizeof(name), "[%s]", line); - map = machine__new_module(self, start, name); + map = machine__new_module(machine, start, name); if (map == NULL) goto out_delete_line; - dso__kernel_module_get_build_id(map->dso, self->root_dir); + dso__kernel_module_get_build_id(map->dso, machine->root_dir); } free(line); fclose(file); - return machine__set_modules_path(self); + return machine__set_modules_path(machine); out_delete_line: free(line); @@ -1832,7 +1839,7 @@ out_failure: return -1; } -int dso__load_vmlinux(struct dso *self, struct map *map, +int dso__load_vmlinux(struct dso *dso, struct map *map, const char *vmlinux, symbol_filter_t filter) { int err = -1, fd; @@ -1844,9 +1851,9 @@ int dso__load_vmlinux(struct dso *self, struct map *map, if (fd < 0) return -1; - dso__set_long_name(self, (char *)vmlinux); - dso__set_loaded(self, map->type); - err = dso__load_sym(self, map, symfs_vmlinux, fd, filter, 0, 0); + dso__set_long_name(dso, (char *)vmlinux); + dso__set_loaded(dso, map->type); + err = dso__load_sym(dso, map, symfs_vmlinux, fd, filter, 0, 0); close(fd); if (err > 0) @@ -1855,7 +1862,7 @@ int dso__load_vmlinux(struct dso *self, struct map *map, return err; } -int dso__load_vmlinux_path(struct dso *self, struct map *map, +int dso__load_vmlinux_path(struct dso *dso, struct map *map, symbol_filter_t filter) { int i, err = 0; @@ -1864,20 +1871,20 @@ int dso__load_vmlinux_path(struct dso *self, struct map *map, pr_debug("Looking at the vmlinux_path (%d entries long)\n", vmlinux_path__nr_entries + 1); - filename = dso__build_id_filename(self, NULL, 0); + filename = dso__build_id_filename(dso, NULL, 0); if (filename != NULL) { - err = dso__load_vmlinux(self, map, filename, filter); + err = dso__load_vmlinux(dso, map, filename, filter); if (err > 0) { - dso__set_long_name(self, filename); + dso__set_long_name(dso, filename); goto out; } free(filename); } for (i = 0; i < vmlinux_path__nr_entries; ++i) { - err = dso__load_vmlinux(self, map, vmlinux_path[i], filter); + err = dso__load_vmlinux(dso, map, vmlinux_path[i], filter); if (err > 0) { - dso__set_long_name(self, strdup(vmlinux_path[i])); + dso__set_long_name(dso, strdup(vmlinux_path[i])); break; } } @@ -1885,7 +1892,7 @@ out: return err; } -static int dso__load_kernel_sym(struct dso *self, struct map *map, +static int dso__load_kernel_sym(struct dso *dso, struct map *map, symbol_filter_t filter) { int err; @@ -1912,10 +1919,10 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, } if (symbol_conf.vmlinux_name != NULL) { - err = dso__load_vmlinux(self, map, + err = dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, filter); if (err > 0) { - dso__set_long_name(self, + dso__set_long_name(dso, strdup(symbol_conf.vmlinux_name)); goto out_fixup; } @@ -1923,7 +1930,7 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, } if (vmlinux_path != NULL) { - err = dso__load_vmlinux_path(self, map, filter); + err = dso__load_vmlinux_path(dso, map, filter); if (err > 0) goto out_fixup; } @@ -1937,13 +1944,13 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, * we have a build-id, so check if it is the same as the running kernel, * using it if it is. */ - if (self->has_build_id) { + if (dso->has_build_id) { u8 kallsyms_build_id[BUILD_ID_SIZE]; char sbuild_id[BUILD_ID_SIZE * 2 + 1]; if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id, sizeof(kallsyms_build_id)) == 0) { - if (dso__build_id_equal(self, kallsyms_build_id)) { + if (dso__build_id_equal(dso, kallsyms_build_id)) { kallsyms_filename = "/proc/kallsyms"; goto do_kallsyms; } @@ -1952,7 +1959,7 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, * Now look if we have it on the build-id cache in * $HOME/.debug/[kernel.kallsyms]. */ - build_id__sprintf(self->build_id, sizeof(self->build_id), + build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); if (asprintf(&kallsyms_allocated_filename, @@ -1979,7 +1986,7 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map, } do_kallsyms: - err = dso__load_kallsyms(self, kallsyms_filename, map, filter); + err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); if (err > 0) pr_debug("Using %s for symbols\n", kallsyms_filename); free(kallsyms_allocated_filename); @@ -1987,7 +1994,7 @@ do_kallsyms: if (err > 0) { out_fixup: if (kallsyms_filename != NULL) - dso__set_long_name(self, strdup("[kernel.kallsyms]")); + dso__set_long_name(dso, strdup("[kernel.kallsyms]")); map__fixup_start(map); map__fixup_end(map); } @@ -1995,8 +2002,8 @@ out_fixup: return err; } -static int dso__load_guest_kernel_sym(struct dso *self, struct map *map, - symbol_filter_t filter) +static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, + symbol_filter_t filter) { int err; const char *kallsyms_filename = NULL; @@ -2016,7 +2023,7 @@ static int dso__load_guest_kernel_sym(struct dso *self, struct map *map, * Or use file guest_kallsyms inputted by user on commandline */ if (symbol_conf.default_guest_vmlinux_name != NULL) { - err = dso__load_vmlinux(self, map, + err = dso__load_vmlinux(dso, map, symbol_conf.default_guest_vmlinux_name, filter); goto out_try_fixup; } @@ -2029,7 +2036,7 @@ static int dso__load_guest_kernel_sym(struct dso *self, struct map *map, kallsyms_filename = path; } - err = dso__load_kallsyms(self, kallsyms_filename, map, filter); + err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); if (err > 0) pr_debug("Using %s for symbols\n", kallsyms_filename); @@ -2037,7 +2044,7 @@ out_try_fixup: if (err > 0) { if (kallsyms_filename != NULL) { machine__mmap_name(machine, path, sizeof(path)); - dso__set_long_name(self, strdup(path)); + dso__set_long_name(dso, strdup(path)); } map__fixup_start(map); map__fixup_end(map); @@ -2090,12 +2097,12 @@ size_t __dsos__fprintf(struct list_head *head, FILE *fp) return ret; } -size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp) +size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp) { struct rb_node *nd; size_t ret = 0; - for (nd = rb_first(self); nd; nd = rb_next(nd)) { + for (nd = rb_first(machines); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); ret += __dsos__fprintf(&pos->kernel_dsos, fp); ret += __dsos__fprintf(&pos->user_dsos, fp); @@ -2119,18 +2126,20 @@ static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, return ret; } -size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits) +size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, + bool with_hits) { - return __dsos__fprintf_buildid(&self->kernel_dsos, fp, with_hits) + - __dsos__fprintf_buildid(&self->user_dsos, fp, with_hits); + return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, with_hits) + + __dsos__fprintf_buildid(&machine->user_dsos, fp, with_hits); } -size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits) +size_t machines__fprintf_dsos_buildid(struct rb_root *machines, + FILE *fp, bool with_hits) { struct rb_node *nd; size_t ret = 0; - for (nd = rb_first(self); nd; nd = rb_next(nd)) { + for (nd = rb_first(machines); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); ret += machine__fprintf_dsos_buildid(pos, fp, with_hits); } @@ -2139,59 +2148,59 @@ size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_ struct dso *dso__new_kernel(const char *name) { - struct dso *self = dso__new(name ?: "[kernel.kallsyms]"); + struct dso *dso = dso__new(name ?: "[kernel.kallsyms]"); - if (self != NULL) { - dso__set_short_name(self, "[kernel]"); - self->kernel = DSO_TYPE_KERNEL; + if (dso != NULL) { + dso__set_short_name(dso, "[kernel]"); + dso->kernel = DSO_TYPE_KERNEL; } - return self; + return dso; } static struct dso *dso__new_guest_kernel(struct machine *machine, const char *name) { char bf[PATH_MAX]; - struct dso *self = dso__new(name ?: machine__mmap_name(machine, bf, sizeof(bf))); - - if (self != NULL) { - dso__set_short_name(self, "[guest.kernel]"); - self->kernel = DSO_TYPE_GUEST_KERNEL; + struct dso *dso = dso__new(name ?: machine__mmap_name(machine, bf, + sizeof(bf))); + if (dso != NULL) { + dso__set_short_name(dso, "[guest.kernel]"); + dso->kernel = DSO_TYPE_GUEST_KERNEL; } - return self; + return dso; } -void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine) +void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) { char path[PATH_MAX]; if (machine__is_default_guest(machine)) return; sprintf(path, "%s/sys/kernel/notes", machine->root_dir); - if (sysfs__read_build_id(path, self->build_id, - sizeof(self->build_id)) == 0) - self->has_build_id = true; + if (sysfs__read_build_id(path, dso->build_id, + sizeof(dso->build_id)) == 0) + dso->has_build_id = true; } -static struct dso *machine__create_kernel(struct machine *self) +static struct dso *machine__create_kernel(struct machine *machine) { const char *vmlinux_name = NULL; struct dso *kernel; - if (machine__is_host(self)) { + if (machine__is_host(machine)) { vmlinux_name = symbol_conf.vmlinux_name; kernel = dso__new_kernel(vmlinux_name); } else { - if (machine__is_default_guest(self)) + if (machine__is_default_guest(machine)) vmlinux_name = symbol_conf.default_guest_vmlinux_name; - kernel = dso__new_guest_kernel(self, vmlinux_name); + kernel = dso__new_guest_kernel(machine, vmlinux_name); } if (kernel != NULL) { - dso__read_running_kernel_build_id(kernel, self); - dsos__add(&self->kernel_dsos, kernel); + dso__read_running_kernel_build_id(kernel, machine); + dsos__add(&machine->kernel_dsos, kernel); } return kernel; } @@ -2236,41 +2245,43 @@ static u64 machine__get_kernel_start_addr(struct machine *machine) return args.start; } -int __machine__create_kernel_maps(struct machine *self, struct dso *kernel) +int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) { enum map_type type; - u64 start = machine__get_kernel_start_addr(self); + u64 start = machine__get_kernel_start_addr(machine); for (type = 0; type < MAP__NR_TYPES; ++type) { struct kmap *kmap; - self->vmlinux_maps[type] = map__new2(start, kernel, type); - if (self->vmlinux_maps[type] == NULL) + machine->vmlinux_maps[type] = map__new2(start, kernel, type); + if (machine->vmlinux_maps[type] == NULL) return -1; - self->vmlinux_maps[type]->map_ip = - self->vmlinux_maps[type]->unmap_ip = identity__map_ip; - - kmap = map__kmap(self->vmlinux_maps[type]); - kmap->kmaps = &self->kmaps; - map_groups__insert(&self->kmaps, self->vmlinux_maps[type]); + machine->vmlinux_maps[type]->map_ip = + machine->vmlinux_maps[type]->unmap_ip = + identity__map_ip; + kmap = map__kmap(machine->vmlinux_maps[type]); + kmap->kmaps = &machine->kmaps; + map_groups__insert(&machine->kmaps, + machine->vmlinux_maps[type]); } return 0; } -void machine__destroy_kernel_maps(struct machine *self) +void machine__destroy_kernel_maps(struct machine *machine) { enum map_type type; for (type = 0; type < MAP__NR_TYPES; ++type) { struct kmap *kmap; - if (self->vmlinux_maps[type] == NULL) + if (machine->vmlinux_maps[type] == NULL) continue; - kmap = map__kmap(self->vmlinux_maps[type]); - map_groups__remove(&self->kmaps, self->vmlinux_maps[type]); + kmap = map__kmap(machine->vmlinux_maps[type]); + map_groups__remove(&machine->kmaps, + machine->vmlinux_maps[type]); if (kmap->ref_reloc_sym) { /* * ref_reloc_sym is shared among all maps, so free just @@ -2284,25 +2295,25 @@ void machine__destroy_kernel_maps(struct machine *self) kmap->ref_reloc_sym = NULL; } - map__delete(self->vmlinux_maps[type]); - self->vmlinux_maps[type] = NULL; + map__delete(machine->vmlinux_maps[type]); + machine->vmlinux_maps[type] = NULL; } } -int machine__create_kernel_maps(struct machine *self) +int machine__create_kernel_maps(struct machine *machine) { - struct dso *kernel = machine__create_kernel(self); + struct dso *kernel = machine__create_kernel(machine); if (kernel == NULL || - __machine__create_kernel_maps(self, kernel) < 0) + __machine__create_kernel_maps(machine, kernel) < 0) return -1; - if (symbol_conf.use_modules && machine__create_modules(self) < 0) + if (symbol_conf.use_modules && machine__create_modules(machine) < 0) pr_debug("Problems creating module maps, continuing anyway...\n"); /* * Now that we have all the maps created, just set the ->end of them: */ - map_groups__fixup_end(&self->kmaps); + map_groups__fixup_end(&machine->kmaps); return 0; } @@ -2366,11 +2377,11 @@ out_fail: return -1; } -size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp) +size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) { int i; size_t printed = 0; - struct dso *kdso = self->vmlinux_maps[MAP__FUNCTION]->dso; + struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso; if (kdso->has_build_id) { char filename[PATH_MAX]; @@ -2467,9 +2478,9 @@ void symbol__exit(void) symbol_conf.initialized = false; } -int machines__create_kernel_maps(struct rb_root *self, pid_t pid) +int machines__create_kernel_maps(struct rb_root *machines, pid_t pid) { - struct machine *machine = machines__findnew(self, pid); + struct machine *machine = machines__findnew(machines, pid); if (machine == NULL) return -1; @@ -2520,7 +2531,7 @@ char *strxfrchar(char *s, char from, char to) return s; } -int machines__create_guest_kernel_maps(struct rb_root *self) +int machines__create_guest_kernel_maps(struct rb_root *machines) { int ret = 0; struct dirent **namelist = NULL; @@ -2531,7 +2542,7 @@ int machines__create_guest_kernel_maps(struct rb_root *self) if (symbol_conf.default_guest_vmlinux_name || symbol_conf.default_guest_modules || symbol_conf.default_guest_kallsyms) { - machines__create_kernel_maps(self, DEFAULT_GUEST_KERNEL_ID); + machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); } if (symbol_conf.guestmount) { @@ -2552,7 +2563,7 @@ int machines__create_guest_kernel_maps(struct rb_root *self) pr_debug("Can't access file %s\n", path); goto failure; } - machines__create_kernel_maps(self, pid); + machines__create_kernel_maps(machines, pid); } failure: free(namelist); @@ -2561,23 +2572,23 @@ failure: return ret; } -void machines__destroy_guest_kernel_maps(struct rb_root *self) +void machines__destroy_guest_kernel_maps(struct rb_root *machines) { - struct rb_node *next = rb_first(self); + struct rb_node *next = rb_first(machines); while (next) { struct machine *pos = rb_entry(next, struct machine, rb_node); next = rb_next(&pos->rb_node); - rb_erase(&pos->rb_node, self); + rb_erase(&pos->rb_node, machines); machine__delete(pos); } } -int machine__load_kallsyms(struct machine *self, const char *filename, +int machine__load_kallsyms(struct machine *machine, const char *filename, enum map_type type, symbol_filter_t filter) { - struct map *map = self->vmlinux_maps[type]; + struct map *map = machine->vmlinux_maps[type]; int ret = dso__load_kallsyms(map->dso, filename, map, filter); if (ret > 0) { @@ -2587,16 +2598,16 @@ int machine__load_kallsyms(struct machine *self, const char *filename, * kernel, with modules between them, fixup the end of all * sections. */ - __map_groups__fixup_end(&self->kmaps, type); + __map_groups__fixup_end(&machine->kmaps, type); } return ret; } -int machine__load_vmlinux_path(struct machine *self, enum map_type type, +int machine__load_vmlinux_path(struct machine *machine, enum map_type type, symbol_filter_t filter) { - struct map *map = self->vmlinux_maps[type]; + struct map *map = machine->vmlinux_maps[type]; int ret = dso__load_vmlinux_path(map->dso, map, filter); if (ret > 0) { diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 713b0b40cc4..242de0101a8 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -62,7 +62,7 @@ struct symbol { char name[0]; }; -void symbol__delete(struct symbol *self); +void symbol__delete(struct symbol *sym); struct strlist; @@ -96,9 +96,9 @@ struct symbol_conf { extern struct symbol_conf symbol_conf; -static inline void *symbol__priv(struct symbol *self) +static inline void *symbol__priv(struct symbol *sym) { - return ((void *)self) - symbol_conf.priv_size; + return ((void *)sym) - symbol_conf.priv_size; } struct ref_reloc_sym { @@ -155,43 +155,45 @@ struct dso { struct dso *dso__new(const char *name); struct dso *dso__new_kernel(const char *name); -void dso__delete(struct dso *self); +void dso__delete(struct dso *dso); -int dso__name_len(const struct dso *self); +int dso__name_len(const struct dso *dso); -bool dso__loaded(const struct dso *self, enum map_type type); -bool dso__sorted_by_name(const struct dso *self, enum map_type type); +bool dso__loaded(const struct dso *dso, enum map_type type); +bool dso__sorted_by_name(const struct dso *dso, enum map_type type); -static inline void dso__set_loaded(struct dso *self, enum map_type type) +static inline void dso__set_loaded(struct dso *dso, enum map_type type) { - self->loaded |= (1 << type); + dso->loaded |= (1 << type); } -void dso__sort_by_name(struct dso *self, enum map_type type); +void dso__sort_by_name(struct dso *dso, enum map_type type); struct dso *__dsos__findnew(struct list_head *head, const char *name); -int dso__load(struct dso *self, struct map *map, symbol_filter_t filter); -int dso__load_vmlinux(struct dso *self, struct map *map, +int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter); +int dso__load_vmlinux(struct dso *dso, struct map *map, const char *vmlinux, symbol_filter_t filter); -int dso__load_vmlinux_path(struct dso *self, struct map *map, +int dso__load_vmlinux_path(struct dso *dso, struct map *map, symbol_filter_t filter); -int dso__load_kallsyms(struct dso *self, const char *filename, struct map *map, +int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map, symbol_filter_t filter); -int machine__load_kallsyms(struct machine *self, const char *filename, +int machine__load_kallsyms(struct machine *machine, const char *filename, enum map_type type, symbol_filter_t filter); -int machine__load_vmlinux_path(struct machine *self, enum map_type type, +int machine__load_vmlinux_path(struct machine *machine, enum map_type type, symbol_filter_t filter); size_t __dsos__fprintf(struct list_head *head, FILE *fp); -size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits); -size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp); -size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits); - -size_t dso__fprintf_buildid(struct dso *self, FILE *fp); -size_t dso__fprintf_symbols_by_name(struct dso *self, enum map_type type, FILE *fp); -size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp); +size_t machine__fprintf_dsos_buildid(struct machine *machine, + FILE *fp, bool with_hits); +size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp); +size_t machines__fprintf_dsos_buildid(struct rb_root *machines, + FILE *fp, bool with_hits); +size_t dso__fprintf_buildid(struct dso *dso, FILE *fp); +size_t dso__fprintf_symbols_by_name(struct dso *dso, + enum map_type type, FILE *fp); +size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp); enum symtab_type { SYMTAB__KALLSYMS = 0, @@ -207,34 +209,36 @@ enum symtab_type { SYMTAB__NOT_FOUND, }; -char dso__symtab_origin(const struct dso *self); -void dso__set_long_name(struct dso *self, char *name); -void dso__set_build_id(struct dso *self, void *build_id); -void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine); -struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr); -struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type, +char dso__symtab_origin(const struct dso *dso); +void dso__set_long_name(struct dso *dso, char *name); +void dso__set_build_id(struct dso *dso, void *build_id); +void dso__read_running_kernel_build_id(struct dso *dso, + struct machine *machine); +struct symbol *dso__find_symbol(struct dso *dso, enum map_type type, + u64 addr); +struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, const char *name); int filename__read_build_id(const char *filename, void *bf, size_t size); int sysfs__read_build_id(const char *filename, void *bf, size_t size); bool __dsos__read_build_ids(struct list_head *head, bool with_hits); -int build_id__sprintf(const u8 *self, int len, char *bf); +int build_id__sprintf(const u8 *build_id, int len, char *bf); int kallsyms__parse(const char *filename, void *arg, int (*process_symbol)(void *arg, const char *name, char type, u64 start, u64 end)); -void machine__destroy_kernel_maps(struct machine *self); -int __machine__create_kernel_maps(struct machine *self, struct dso *kernel); -int machine__create_kernel_maps(struct machine *self); +void machine__destroy_kernel_maps(struct machine *machine); +int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel); +int machine__create_kernel_maps(struct machine *machine); -int machines__create_kernel_maps(struct rb_root *self, pid_t pid); -int machines__create_guest_kernel_maps(struct rb_root *self); -void machines__destroy_guest_kernel_maps(struct rb_root *self); +int machines__create_kernel_maps(struct rb_root *machines, pid_t pid); +int machines__create_guest_kernel_maps(struct rb_root *machines); +void machines__destroy_guest_kernel_maps(struct rb_root *machines); int symbol__init(void); void symbol__exit(void); bool symbol_type__is_a(char symbol_type, enum map_type map_type); -size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp); +size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp); #endif /* __PERF_SYMBOL */ -- cgit v1.2.3 From 3643b133f2cb8023e8cedcbef43215a99d7df561 Mon Sep 17 00:00:00 2001 From: Michael Witten Date: Sat, 9 Apr 2011 01:12:56 +0000 Subject: perf tools: Makefile: Clean up `python/perf.so' rule There is no need for a subshell or an explicit `export'; as per the POSIX Shell Command Language specification: http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_09_01 http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_10_02 It is only necessary to include the environment variable assignment just before the command to be run. Also, it is better to use single-quotes, because GNU make might expand `$(BASIC_CFLAGS)' into something that the shell could interpret within double-quotes. Acked-by: Raghavendra D Prabhu Link: http://lkml.kernel.org/n/tip-58n38o02ocuzrm9qh096hsf5@git.kernel.org Signed-off-by: Michael Witten Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 207dee5c5b1..aaf4dd3fd80 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -165,12 +165,10 @@ grep-libs = $(filter -l%,$(1)) strip-libs = $(filter-out -l%,$(1)) $(OUTPUT)python/perf.so: $(PYRF_OBJS) - $(QUIET_GEN)( \ - export CFLAGS="$(BASIC_CFLAGS)"; \ - python util/setup.py --quiet build_ext --build-lib='$(OUTPUT)python' \ - --build-temp='$(OUTPUT)python/temp' \ - ) - + $(QUIET_GEN)CFLAGS='$(BASIC_CFLAGS)' python util/setup.py \ + --quiet build_ext \ + --build-lib='$(OUTPUT)python' \ + --build-temp='$(OUTPUT)python/temp' # # No Perl scripts right now: # -- cgit v1.2.3 From ced465c400b23656ef2c4fbfb4add0e5b92e3d97 Mon Sep 17 00:00:00 2001 From: Michael Witten Date: Sat, 2 Apr 2011 21:46:09 +0000 Subject: perf tools: Makefile: PYTHON{,_CONFIG} to bandage Python 3 incompatibility Currently, Python 3 is not supported by perf's code; this can cause the build to fail for systems that have Python 3 installed as the default python: python{,-config} The Correct Solution is to write compatibility code so that Python 3 works out-of-the-box. However, users often have an ancillary Python 2 installed: python2{,-config} Therefore, a quick fix is to allow the user to specify those ancillary paths as the python binaries that Makefile should use, thereby avoiding Python 3 altogether; as an added benefit, the Python binaries may be installed in non-standard locations without the need for updating any PATH variable. This commit adds the ability to set PYTHON and/or PYTHON_CONFIG either as environment variables or as make variables on the command line; the paths may be relative, and usually only PYTHON is necessary in order for PYTHON_CONFIG to be defined implicitly. Some rudimentary error checking is performed when the user explicitly specifies a value for any of these variables. In addition, this commit introduces significantly robust makefile infrastructure for working with paths and communicating with the shell; it's currently only used for handling Python, but I hope it will prove useful in refactoring the makefiles. Thanks to: Raghavendra D Prabhu for motivating this patch. Acked-by: Raghavendra D Prabhu Link: http://lkml.kernel.org/r/e987828e-87ec-4973-95e7-47f10f5d9bab-mfwitten@gmail.com Signed-off-by: Michael Witten Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile | 97 +++++++++++++++++----- tools/perf/config/utilities.mak | 180 ++++++++++++++++++++++++++++++++++++++++ tools/perf/feature-tests.mak | 8 +- 3 files changed, 264 insertions(+), 21 deletions(-) create mode 100644 tools/perf/config/utilities.mak diff --git a/tools/perf/Makefile b/tools/perf/Makefile index aaf4dd3fd80..b5276c7e9be 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -5,6 +5,8 @@ endif # The default target of this Makefile is... all: +include config/utilities.mak + ifneq ($(OUTPUT),) # check that the output directory actually exists OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd) @@ -13,6 +15,12 @@ endif # Define V to have a more verbose compile. # +# Define PYTHON to point to the python binary if the default +# `python' is not correct; for example: PYTHON=python2 +# +# Define PYTHON_CONFIG to point to the python-config binary if +# the default `$(PYTHON)-config' is not correct. +# # Define ASCIIDOC8 if you want to format documentation with AsciiDoc 8 # # Define DOCBOOK_XSL_172 if you want to format man pages with DocBook XSL v1.72. @@ -165,7 +173,7 @@ grep-libs = $(filter -l%,$(1)) strip-libs = $(filter-out -l%,$(1)) $(OUTPUT)python/perf.so: $(PYRF_OBJS) - $(QUIET_GEN)CFLAGS='$(BASIC_CFLAGS)' python util/setup.py \ + $(QUIET_GEN)CFLAGS='$(BASIC_CFLAGS)' $(PYTHON_WORD) util/setup.py \ --quiet build_ext \ --build-lib='$(OUTPUT)python' \ --build-temp='$(OUTPUT)python/temp' @@ -473,24 +481,74 @@ else endif endif -ifdef NO_LIBPYTHON - BASIC_CFLAGS += -DNO_LIBPYTHON +disable-python = $(eval $(disable-python_code)) +define disable-python_code + BASIC_CFLAGS += -DNO_LIBPYTHON + $(if $(1),$(warning No $(1) was found)) + $(warning Python support won't be built) +endef + +override PYTHON := \ + $(call get-executable-or-default,PYTHON,python) + +ifndef PYTHON + $(call disable-python,python interpreter) + python-clean := else - PYTHON_EMBED_LDOPTS = $(shell python-config --ldflags 2>/dev/null) - PYTHON_EMBED_LDFLAGS = $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) - PYTHON_EMBED_LIBADD = $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) - PYTHON_EMBED_CCOPTS = `python-config --cflags 2>/dev/null` - FLAGS_PYTHON_EMBED=$(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) - ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED)),y) - msg := $(warning No Python.h found, install python-dev[el] to have python support in 'perf script' and to build the python bindings) - BASIC_CFLAGS += -DNO_LIBPYTHON - else - ALL_LDFLAGS += $(PYTHON_EMBED_LDFLAGS) - EXTLIBS += $(PYTHON_EMBED_LIBADD) - LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o - LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o - LANG_BINDINGS += $(OUTPUT)python/perf.so - endif + + PYTHON_WORD := $(call shell-wordify,$(PYTHON)) + + python-clean := $(PYTHON_WORD) util/setup.py clean \ + --build-lib='$(OUTPUT)python' \ + --build-temp='$(OUTPUT)python/temp' + + ifdef NO_LIBPYTHON + $(call disable-python) + else + + override PYTHON_CONFIG := \ + $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON)-config) + + ifndef PYTHON_CONFIG + $(call disable-python,python-config tool) + else + + PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG)) + + PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null) + PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) + PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) + PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null) + FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) + + ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED)),y) + $(call disable-python,Python.h (for Python 2.x)) + else + + ifneq ($(call try-cc,$(SOURCE_PYTHON_VERSION),$(FLAGS_PYTHON_EMBED)),y) + $(warning Python 3 is not yet supported; please set) + $(warning PYTHON and/or PYTHON_CONFIG appropriately.) + $(warning If you also have Python 2 installed, then) + $(warning try something like:) + $(warning $(and ,)) + $(warning $(and ,) make PYTHON=python2) + $(warning $(and ,)) + $(warning Otherwise, disable Python support entirely:) + $(warning $(and ,)) + $(warning $(and ,) make NO_LIBPYTHON=1) + $(warning $(and ,)) + $(error $(and ,)) + else + ALL_LDFLAGS += $(PYTHON_EMBED_LDFLAGS) + EXTLIBS += $(PYTHON_EMBED_LIBADD) + LIB_OBJS += $(OUTPUT)util/scripting-engines/trace-event-python.o + LIB_OBJS += $(OUTPUT)scripts/python/Perf-Trace-Util/Context.o + LANG_BINDINGS += $(OUTPUT)python/perf.so + endif + + endif + endif + endif endif ifdef NO_DEMANGLE @@ -831,8 +889,7 @@ clean: $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(MAKE) -C Documentation/ clean $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS - @python util/setup.py clean --build-lib='$(OUTPUT)python' \ - --build-temp='$(OUTPUT)python/temp' + $(python-clean) .PHONY: all install clean strip .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell diff --git a/tools/perf/config/utilities.mak b/tools/perf/config/utilities.mak new file mode 100644 index 00000000000..6d8ff887840 --- /dev/null +++ b/tools/perf/config/utilities.mak @@ -0,0 +1,180 @@ +# This allows us to work with the newline character: +define newline + + +endef +newline := $(newline) + +# nl-escape +# +# Usage: escape = $(call nl-escape[,escape]) +# +# This is used as the common way to specify +# what should replace a newline when escaping +# newlines; the default is a bizarre string. +# +nl-escape = $(or $(1),m822df3020w6a44id34bt574ctac44eb9f4n) + +# escape-nl +# +# Usage: escaped-text = $(call escape-nl,text[,escape]) +# +# GNU make's $(shell ...) function converts to a +# single space each newline character in the output +# produced during the expansion; this may not be +# desirable. +# +# The only solution is to change each newline into +# something that won't be converted, so that the +# information can be recovered later with +# $(call unescape-nl...) +# +escape-nl = $(subst $(newline),$(call nl-escape,$(2)),$(1)) + +# unescape-nl +# +# Usage: text = $(call unescape-nl,escaped-text[,escape]) +# +# See escape-nl. +# +unescape-nl = $(subst $(call nl-escape,$(2)),$(newline),$(1)) + +# shell-escape-nl +# +# Usage: $(shell some-command | $(call shell-escape-nl[,escape])) +# +# Use this to escape newlines from within a shell call; +# the default escape is a bizarre string. +# +# NOTE: The escape is used directly as a string constant +# in an `awk' program that is delimited by shell +# single-quotes, so be wary of the characters +# that are chosen. +# +define shell-escape-nl +awk 'NR==1 {t=$$0} NR>1 {t=t "$(nl-escape)" $$0} END {printf t}' +endef + +# shell-unescape-nl +# +# Usage: $(shell some-command | $(call shell-unescape-nl[,escape])) +# +# Use this to unescape newlines from within a shell call; +# the default escape is a bizarre string. +# +# NOTE: The escape is used directly as an extended regular +# expression constant in an `awk' program that is +# delimited by shell single-quotes, so be wary +# of the characters that are chosen. +# +# (The bash shell has a bug where `{gsub(...),...}' is +# misinterpreted as a brace expansion; this can be +# overcome by putting a space between `{' and `gsub'). +# +define shell-unescape-nl +awk 'NR==1 {t=$$0} NR>1 {t=t "\n" $$0} END { gsub(/$(nl-escape)/,"\n",t); printf t }' +endef + +# escape-for-shell-sq +# +# Usage: embeddable-text = $(call escape-for-shell-sq,text) +# +# This function produces text that is suitable for +# embedding in a shell string that is delimited by +# single-quotes. +# +escape-for-shell-sq = $(subst ','\'',$(1)) + +# shell-sq +# +# Usage: single-quoted-and-escaped-text = $(call shell-sq,text) +# +shell-sq = '$(escape-for-shell-sq)' + +# shell-wordify +# +# Usage: wordified-text = $(call shell-wordify,text) +# +# For instance: +# +# |define text +# |hello +# |world +# |endef +# | +# |target: +# | echo $(call shell-wordify,$(text)) +# +# At least GNU make gets confused by expanding a newline +# within the context of a command line of a makefile rule +# (this is in constrast to a `$(shell ...)' function call, +# which can handle it just fine). +# +# This function avoids the problem by producing a string +# that works as a shell word, regardless of whether or +# not it contains a newline. +# +# If the text to be wordified contains a newline, then +# an intrictate shell command substitution is constructed +# to render the text as a single line; when the shell +# processes the resulting escaped text, it transforms +# it into the original unescaped text. +# +# If the text does not contain a newline, then this function +# produces the same results as the `$(shell-sq)' function. +# +shell-wordify = $(if $(findstring $(newline),$(1)),$(_sw-esc-nl),$(shell-sq)) +define _sw-esc-nl +"$$(echo $(call escape-nl,$(shell-sq),$(2)) | $(call shell-unescape-nl,$(2)))" +endef + +# is-absolute +# +# Usage: bool-value = $(call is-absolute,path) +# +is-absolute = $(shell echo $(shell-sq) | grep ^/ -q && echo y) + +# lookup +# +# Usage: absolute-executable-path-or-empty = $(call lookup,path) +# +# (It's necessary to use `sh -c' because GNU make messes up by +# trying too hard and getting things wrong). +# +lookup = $(call unescape-nl,$(shell sh -c $(_l-sh))) +_l-sh = $(call shell-sq,command -v $(shell-sq) | $(call shell-escape-nl,)) + +# is-executable +# +# Usage: bool-value = $(call is-executable,path) +# +# (It's necessary to use `sh -c' because GNU make messes up by +# trying too hard and getting things wrong). +# +is-executable = $(call _is-executable-helper,$(shell-sq)) +_is-executable-helper = $(shell sh -c $(_is-executable-sh)) +_is-executable-sh = $(call shell-sq,test -f $(1) -a -x $(1) && echo y) + +# get-executable +# +# Usage: absolute-executable-path-or-empty = $(call get-executable,path) +# +# The goal is to get an absolute path for an executable; +# the `command -v' is defined by POSIX, but it's not +# necessarily very portable, so it's only used if +# relative path resolution is requested, as determined +# by the presence of a leading `/'. +# +get-executable = $(if $(1),$(if $(is-absolute),$(_ge-abspath),$(lookup))) +_ge-abspath = $(if $(is-executable),$(1)) + +# get-supplied-or-default-executable +# +# Usage: absolute-executable-path-or-empty = $(call get-executable-or-default,variable,default) +# +define get-executable-or-default +$(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2))) +endef +_ge_attempt = $(or $(get-executable),$(_gea_warn),$(call _gea_err,$(2))) +_gea_warn = $(warning The path '$(1)' is not executable.) +_gea_err = $(if $(1),$(error Please set '$(1)' appropriately)) diff --git a/tools/perf/feature-tests.mak b/tools/perf/feature-tests.mak index b041ca67a2c..1b3342001e1 100644 --- a/tools/perf/feature-tests.mak +++ b/tools/perf/feature-tests.mak @@ -79,9 +79,15 @@ endef endif ifndef NO_LIBPYTHON +define SOURCE_PYTHON_VERSION +#include +#if PY_VERSION_HEX >= 0x03000000 + #error +#endif +int main(void){} +endef define SOURCE_PYTHON_EMBED #include - int main(void) { Py_Initialize(); -- cgit v1.2.3 From 7fbd065f5a2b299172502f09fc3fbde02b48f591 Mon Sep 17 00:00:00 2001 From: Michael Witten Date: Tue, 12 Apr 2011 20:27:59 +0000 Subject: perf tools: Move `try-cc' The `try-cc' user-defined function was in tools/perf/feature-tests.mak; this commit moves it to tools/perf/config/utilities.mak. Signed-off-by: Michael Witten Link: http://lkml.kernel.org/n/tip-bqhwcuxsrve0iodn6q4ejaoi@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/config/utilities.mak | 8 ++++++++ tools/perf/feature-tests.mak | 8 -------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/perf/config/utilities.mak b/tools/perf/config/utilities.mak index 6d8ff887840..8046182a19e 100644 --- a/tools/perf/config/utilities.mak +++ b/tools/perf/config/utilities.mak @@ -178,3 +178,11 @@ endef _ge_attempt = $(or $(get-executable),$(_gea_warn),$(call _gea_err,$(2))) _gea_warn = $(warning The path '$(1)' is not executable.) _gea_err = $(if $(1),$(error Please set '$(1)' appropriately)) + +# try-cc +# Usage: option = $(call try-cc, source-to-build, cc-options) +try-cc = $(shell sh -c \ + 'TMP="$(OUTPUT)$(TMPOUT).$$$$"; \ + echo "$(1)" | \ + $(CC) -x c - $(2) -o "$$TMP" > /dev/null 2>&1 && echo y; \ + rm -f "$$TMP"') diff --git a/tools/perf/feature-tests.mak b/tools/perf/feature-tests.mak index 1b3342001e1..6170fd2531b 100644 --- a/tools/perf/feature-tests.mak +++ b/tools/perf/feature-tests.mak @@ -126,11 +126,3 @@ int main(void) return 0; } endef - -# try-cc -# Usage: option = $(call try-cc, source-to-build, cc-options) -try-cc = $(shell sh -c \ - 'TMP="$(OUTPUT)$(TMPOUT).$$$$"; \ - echo "$(1)" | \ - $(CC) -x c - $(2) -o "$$TMP" > /dev/null 2>&1 && echo y; \ - rm -f "$$TMP"') -- cgit v1.2.3 From f18568aae5612ab37f20e5f383d6154ea69c9dfc Mon Sep 17 00:00:00 2001 From: Michael Witten Date: Tue, 12 Apr 2011 20:30:13 +0000 Subject: perf tools: git mv tools/perf/{features-tests.mak,config/} Signed-off-by: Michael Witten Link: http://lkml.kernel.org/n/tip-a6zhefjayuounko1tk5sjji2@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile | 2 +- tools/perf/config/feature-tests.mak | 128 ++++++++++++++++++++++++++++++++++++ tools/perf/feature-tests.mak | 128 ------------------------------------ 3 files changed, 129 insertions(+), 129 deletions(-) create mode 100644 tools/perf/config/feature-tests.mak delete mode 100644 tools/perf/feature-tests.mak diff --git a/tools/perf/Makefile b/tools/perf/Makefile index b5276c7e9be..91ad5cc95d8 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -138,7 +138,7 @@ INSTALL = install # explicitly what architecture to check for. Fix this up for yours.. SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__ --include feature-tests.mak +-include config/feature-tests.mak ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -fstack-protector-all),y) CFLAGS := $(CFLAGS) -fstack-protector-all diff --git a/tools/perf/config/feature-tests.mak b/tools/perf/config/feature-tests.mak new file mode 100644 index 00000000000..6170fd2531b --- /dev/null +++ b/tools/perf/config/feature-tests.mak @@ -0,0 +1,128 @@ +define SOURCE_HELLO +#include +int main(void) +{ + return puts(\"hi\"); +} +endef + +ifndef NO_DWARF +define SOURCE_DWARF +#include +#include +#include +#ifndef _ELFUTILS_PREREQ +#error +#endif + +int main(void) +{ + Dwarf *dbg = dwarf_begin(0, DWARF_C_READ); + return (long)dbg; +} +endef +endif + +define SOURCE_LIBELF +#include + +int main(void) +{ + Elf *elf = elf_begin(0, ELF_C_READ, 0); + return (long)elf; +} +endef + +define SOURCE_GLIBC +#include + +int main(void) +{ + const char *version = gnu_get_libc_version(); + return (long)version; +} +endef + +define SOURCE_ELF_MMAP +#include +int main(void) +{ + Elf *elf = elf_begin(0, ELF_C_READ_MMAP, 0); + return (long)elf; +} +endef + +ifndef NO_NEWT +define SOURCE_NEWT +#include + +int main(void) +{ + newtInit(); + newtCls(); + return newtFinished(); +} +endef +endif + +ifndef NO_LIBPERL +define SOURCE_PERL_EMBED +#include +#include + +int main(void) +{ +perl_alloc(); +return 0; +} +endef +endif + +ifndef NO_LIBPYTHON +define SOURCE_PYTHON_VERSION +#include +#if PY_VERSION_HEX >= 0x03000000 + #error +#endif +int main(void){} +endef +define SOURCE_PYTHON_EMBED +#include +int main(void) +{ + Py_Initialize(); + return 0; +} +endef +endif + +define SOURCE_BFD +#include + +int main(void) +{ + bfd_demangle(0, 0, 0); + return 0; +} +endef + +define SOURCE_CPLUS_DEMANGLE +extern char *cplus_demangle(const char *, int); + +int main(void) +{ + cplus_demangle(0, 0); + return 0; +} +endef + +define SOURCE_STRLCPY +#include +extern size_t strlcpy(char *dest, const char *src, size_t size); + +int main(void) +{ + strlcpy(NULL, NULL, 0); + return 0; +} +endef diff --git a/tools/perf/feature-tests.mak b/tools/perf/feature-tests.mak deleted file mode 100644 index 6170fd2531b..00000000000 --- a/tools/perf/feature-tests.mak +++ /dev/null @@ -1,128 +0,0 @@ -define SOURCE_HELLO -#include -int main(void) -{ - return puts(\"hi\"); -} -endef - -ifndef NO_DWARF -define SOURCE_DWARF -#include -#include -#include -#ifndef _ELFUTILS_PREREQ -#error -#endif - -int main(void) -{ - Dwarf *dbg = dwarf_begin(0, DWARF_C_READ); - return (long)dbg; -} -endef -endif - -define SOURCE_LIBELF -#include - -int main(void) -{ - Elf *elf = elf_begin(0, ELF_C_READ, 0); - return (long)elf; -} -endef - -define SOURCE_GLIBC -#include - -int main(void) -{ - const char *version = gnu_get_libc_version(); - return (long)version; -} -endef - -define SOURCE_ELF_MMAP -#include -int main(void) -{ - Elf *elf = elf_begin(0, ELF_C_READ_MMAP, 0); - return (long)elf; -} -endef - -ifndef NO_NEWT -define SOURCE_NEWT -#include - -int main(void) -{ - newtInit(); - newtCls(); - return newtFinished(); -} -endef -endif - -ifndef NO_LIBPERL -define SOURCE_PERL_EMBED -#include -#include - -int main(void) -{ -perl_alloc(); -return 0; -} -endef -endif - -ifndef NO_LIBPYTHON -define SOURCE_PYTHON_VERSION -#include -#if PY_VERSION_HEX >= 0x03000000 - #error -#endif -int main(void){} -endef -define SOURCE_PYTHON_EMBED -#include -int main(void) -{ - Py_Initialize(); - return 0; -} -endef -endif - -define SOURCE_BFD -#include - -int main(void) -{ - bfd_demangle(0, 0, 0); - return 0; -} -endef - -define SOURCE_CPLUS_DEMANGLE -extern char *cplus_demangle(const char *, int); - -int main(void) -{ - cplus_demangle(0, 0); - return 0; -} -endef - -define SOURCE_STRLCPY -#include -extern size_t strlcpy(char *dest, const char *src, size_t size); - -int main(void) -{ - strlcpy(NULL, NULL, 0); - return 0; -} -endef -- cgit v1.2.3 From 0817a6a3a4fc7c069111083351ca33a78da2a0c9 Mon Sep 17 00:00:00 2001 From: Arun Sharma Date: Thu, 14 Apr 2011 10:38:18 -0700 Subject: perf script: Add support for PERF_TYPE_RAW Useful for getting stack traces for hardware events not handled by PERF_TYPE_HARDWARE. Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Tom Zanussi Signed-off-by: Arun Sharma Link: http://lkml.kernel.org/n/tip-qimdcdpekjqxuyqovy4kjusx@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-script.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 6cf811acc41..b3012c4fff1 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -82,6 +82,16 @@ static struct { PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE, }, + + [PERF_TYPE_RAW] = { + .user_set = false, + + .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | + PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | + PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, + + .invalid_fields = PERF_OUTPUT_TRACE, + }, }; static bool output_set_by_user(void) @@ -502,6 +512,8 @@ static int parse_output_fields(const struct option *opt __used, type = PERF_TYPE_SOFTWARE; else if (!strcmp(str, "trace")) type = PERF_TYPE_TRACEPOINT; + else if (!strcmp(str, "raw")) + type = PERF_TYPE_RAW; else { fprintf(stderr, "Invalid event type in field string.\n"); return -EINVAL; @@ -902,7 +914,7 @@ static const struct option options[] = { OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", "Look for files with symbols relative to this directory"), OPT_CALLBACK('f', "fields", NULL, "str", - "comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace. Fields: comm,tid,pid,time,cpu,event,trace,sym", + "comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace,raw. Fields: comm,tid,pid,time,cpu,event,trace,sym", parse_output_fields), OPT_END() -- cgit v1.2.3 From 9cbdb702092a2d82f909312f4ec3eeded77bb82e Mon Sep 17 00:00:00 2001 From: David Ahern Date: Wed, 6 Apr 2011 21:54:20 -0600 Subject: perf script: improve validation of sample attributes for output fields Check for required sample attributes using evsel rather than sample_type in the session header. If the attribute for a default field is not present for the event type (e.g., new command operating on file from older kernel) the field is removed from the output list. Expected event types must exist. For example, if a user specifies -f trace:time,trace -f sw:time,cpu,sym the perf.data file must contain both tracepoints and software events (ie., it is an error if either does not exist in the file). Attribute checking is done once at the beginning of perf-script rather than for each sample. v1 -> v2: - addressed comments from acme Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1302148460-570-1-git-send-email-daahern@cisco.com Signed-off-by: David Ahern Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-script.c | 111 +++++++++++++++++++++++++++++++++++++------- tools/perf/util/session.c | 12 +++++ tools/perf/util/session.h | 3 ++ 3 files changed, 109 insertions(+), 17 deletions(-) diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index b3012c4fff1..974f6d3f4e5 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -51,6 +51,7 @@ struct output_option { /* default set to maintain compatibility with current format */ static struct { bool user_set; + bool wildcard_set; u64 fields; u64 invalid_fields; } output[PERF_TYPE_MAX] = { @@ -104,41 +105,113 @@ static bool output_set_by_user(void) return false; } +static const char *output_field2str(enum perf_output_field field) +{ + int i, imax = ARRAY_SIZE(all_output_options); + const char *str = ""; + + for (i = 0; i < imax; ++i) { + if (all_output_options[i].field == field) { + str = all_output_options[i].str; + break; + } + } + return str; +} + #define PRINT_FIELD(x) (output[attr->type].fields & PERF_OUTPUT_##x) -static int perf_session__check_attr(struct perf_session *session, - struct perf_event_attr *attr) +static int perf_event_attr__check_stype(struct perf_event_attr *attr, + u64 sample_type, const char *sample_msg, + enum perf_output_field field) { + int type = attr->type; + const char *evname; + + if (attr->sample_type & sample_type) + return 0; + + if (output[type].user_set) { + evname = __event_name(attr->type, attr->config); + pr_err("Samples for '%s' event do not have %s attribute set. " + "Cannot print '%s' field.\n", + evname, sample_msg, output_field2str(field)); + return -1; + } + + /* user did not ask for it explicitly so remove from the default list */ + output[type].fields &= ~field; + evname = __event_name(attr->type, attr->config); + pr_debug("Samples for '%s' event do not have %s attribute set. " + "Skipping '%s' field.\n", + evname, sample_msg, output_field2str(field)); + + return 0; +} + +static int perf_evsel__check_attr(struct perf_evsel *evsel, + struct perf_session *session) +{ + struct perf_event_attr *attr = &evsel->attr; + if (PRINT_FIELD(TRACE) && !perf_session__has_traces(session, "record -R")) return -EINVAL; if (PRINT_FIELD(SYM)) { - if (!(session->sample_type & PERF_SAMPLE_IP)) { - pr_err("Samples do not contain IP data.\n"); + if (perf_event_attr__check_stype(attr, PERF_SAMPLE_IP, "IP", + PERF_OUTPUT_SYM)) return -EINVAL; - } + if (!no_callchain && - !(session->sample_type & PERF_SAMPLE_CALLCHAIN)) + !(attr->sample_type & PERF_SAMPLE_CALLCHAIN)) symbol_conf.use_callchain = false; } if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) && - !(session->sample_type & PERF_SAMPLE_TID)) { - pr_err("Samples do not contain TID/PID data.\n"); + perf_event_attr__check_stype(attr, PERF_SAMPLE_TID, "TID", + PERF_OUTPUT_TID|PERF_OUTPUT_PID)) return -EINVAL; - } if (PRINT_FIELD(TIME) && - !(session->sample_type & PERF_SAMPLE_TIME)) { - pr_err("Samples do not contain timestamps.\n"); + perf_event_attr__check_stype(attr, PERF_SAMPLE_TIME, "TIME", + PERF_OUTPUT_TIME)) return -EINVAL; - } if (PRINT_FIELD(CPU) && - !(session->sample_type & PERF_SAMPLE_CPU)) { - pr_err("Samples do not contain cpu.\n"); + perf_event_attr__check_stype(attr, PERF_SAMPLE_CPU, "CPU", + PERF_OUTPUT_CPU)) return -EINVAL; + + return 0; +} + +/* + * verify all user requested events exist and the samples + * have the expected data + */ +static int perf_session__check_output_opt(struct perf_session *session) +{ + int j; + struct perf_evsel *evsel; + + for (j = 0; j < PERF_TYPE_MAX; ++j) { + evsel = perf_session__find_first_evtype(session, j); + + /* + * even if fields is set to 0 (ie., show nothing) event must + * exist if user explicitly includes it on the command line + */ + if (!evsel && output[j].user_set && !output[j].wildcard_set) { + pr_err("%s events do not exist. " + "Remove corresponding -f option to proceed.\n", + event_type(j)); + return -1; + } + + if (evsel && output[j].fields && + perf_evsel__check_attr(evsel, session)) + return -1; } return 0; @@ -210,9 +283,6 @@ static void process_event(union perf_event *event __unused, if (output[attr->type].fields == 0) return; - if (perf_session__check_attr(session, attr) < 0) - return; - print_sample_start(sample, thread, attr); if (PRINT_FIELD(TRACE)) @@ -525,6 +595,7 @@ static int parse_output_fields(const struct option *opt __used, output[type].fields = 0; output[type].user_set = true; + output[type].wildcard_set = false; } else { tok = str; @@ -541,6 +612,7 @@ static int parse_output_fields(const struct option *opt __used, for (j = 0; j < PERF_TYPE_MAX; ++j) { output[j].fields = 0; output[j].user_set = true; + output[j].wildcard_set = true; } } @@ -1145,6 +1217,11 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) pr_debug("perf script started with script %s\n\n", script_name); } + + err = perf_session__check_output_opt(session); + if (err < 0) + goto out; + err = __cmd_script(session); perf_session__delete(session); diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index caa224522fe..fff66741f18 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1156,6 +1156,18 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) return ret; } +struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, + unsigned int type) +{ + struct perf_evsel *pos; + + list_for_each_entry(pos, &session->evlist->entries, node) { + if (pos->attr.type == type) + return pos; + } + return NULL; +} + void perf_session__print_symbols(union perf_event *event, struct perf_sample *sample, struct perf_session *session) diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 1ac481fc110..8daaa2d1539 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -162,6 +162,9 @@ static inline int perf_session__parse_sample(struct perf_session *session, session->sample_id_all, sample); } +struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, + unsigned int type); + void perf_session__print_symbols(union perf_event *event, struct perf_sample *sample, struct perf_session *session); -- cgit v1.2.3 From 103b3934817a7c42fba6e1ef76ecb390a2837d40 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Thu, 21 Apr 2011 11:03:20 -0400 Subject: perf, x86: P4 PMU -- Use perf_sample_data_init helper Instead of opencoded assignments better to use perf_sample_data_init helper. Tested-by: Lin Ming Signed-off-by: Cyrill Gorcunov Signed-off-by: Don Zickus Cc: Cyrill Gorcunov Link: http://lkml.kernel.org/r/1303398203-2918-2-git-send-email-dzickus@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_p4.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index 8ff882fdb1c..ae31e9698d9 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c @@ -912,8 +912,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) int idx, handled = 0; u64 val; - data.addr = 0; - data.raw = NULL; + perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); -- cgit v1.2.3 From fa7b69475a6c192853949ba496dd9c37b497b548 Mon Sep 17 00:00:00 2001 From: "Justin P. Mattock" Date: Fri, 22 Apr 2011 10:08:52 -0700 Subject: perf events, x86, P4: Fix typo in comment Signed-off-by: Justin P. Mattock Acked-by: Cyrill Gorcunov Cc: trivial@kernel.org Link: http://lkml.kernel.org/r/1303492132-3004-1-git-send-email-justinmattock@gmail.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_p4.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index ae31e9698d9..f4c1da2f935 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c @@ -1187,7 +1187,7 @@ static __init int p4_pmu_init(void) { unsigned int low, high; - /* If we get stripped -- indexig fails */ + /* If we get stripped -- indexing fails */ BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC); rdmsr(MSR_IA32_MISC_ENABLE, low, high); -- cgit v1.2.3 From 94403f8863d0d1d2005291b2ef0719c2534aa303 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 24 Apr 2011 08:18:31 +0200 Subject: perf events: Add stalled cycles generic event - PERF_COUNT_HW_STALLED_CYCLES The new PERF_COUNT_HW_STALLED_CYCLES event tries to approximate cycles the CPU does nothing useful, because it is stalled on a cache-miss or some other condition. Acked-by: Peter Zijlstra Acked-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-fue11vymwqsoo5to72jxxjyl@git.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel.c | 3 +++ include/linux/perf_event.h | 1 + tools/perf/util/parse-events.c | 1 + tools/perf/util/python.c | 1 + 4 files changed, 6 insertions(+) diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 9ae4a2aa739..efa2704c9df 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1413,6 +1413,9 @@ static __init int intel_pmu_init(void) x86_pmu.enable_all = intel_pmu_nhm_enable_all; x86_pmu.extra_regs = intel_nehalem_extra_regs; + /* Install the stalled-cycles event: 0xff: All reasons, 0xa2: Resource stalls */ + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES] = 0xffa2; + if (ebx & 0x40) { /* * Erratum AAJ80 detected, we work it around by using diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index ee9f1e78280..ac636dd20a0 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -52,6 +52,7 @@ enum perf_hw_id { PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, PERF_COUNT_HW_BRANCH_MISSES = 5, PERF_COUNT_HW_BUS_CYCLES = 6, + PERF_COUNT_HW_STALLED_CYCLES = 7, PERF_COUNT_HW_MAX, /* non-ABI */ }; diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 952b4ae3d95..1869e4c646d 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -38,6 +38,7 @@ static struct event_symbol event_symbols[] = { { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" }, { CHW(BRANCH_MISSES), "branch-misses", "" }, { CHW(BUS_CYCLES), "bus-cycles", "" }, + { CHW(STALLED_CYCLES), "stalled-cycles", "" }, { CSW(CPU_CLOCK), "cpu-clock", "" }, { CSW(TASK_CLOCK), "task-clock", "" }, diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index f5e38451fdc..406f613ee61 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -798,6 +798,7 @@ static struct { { "COUNT_HW_BRANCH_INSTRUCTIONS", PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, { "COUNT_HW_BRANCH_MISSES", PERF_COUNT_HW_BRANCH_MISSES }, { "COUNT_HW_BUS_CYCLES", PERF_COUNT_HW_BUS_CYCLES }, + { "COUNT_HW_STALLED_CYCLES", PERF_COUNT_HW_STALLED_CYCLES }, { "COUNT_HW_CACHE_L1D", PERF_COUNT_HW_CACHE_L1D }, { "COUNT_HW_CACHE_L1I", PERF_COUNT_HW_CACHE_L1I }, { "COUNT_HW_CACHE_LL", PERF_COUNT_HW_CACHE_LL }, -- cgit v1.2.3 From 5c543e3c442d6382db127152c7096ca6a55283de Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 27 Apr 2011 12:02:04 +0200 Subject: perf events, x86: Mark constrant tables read mostly Various constraint tables were not marked read-mostly. Acked-by: Peter Zijlstra Acked-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-wpqwwvmhxucy5e718wnamjiv@git.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index efa2704c9df..067a48b13a7 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -36,7 +36,7 @@ static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, }; -static struct event_constraint intel_core_event_constraints[] = +static struct event_constraint intel_core_event_constraints[] __read_mostly = { INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ @@ -47,7 +47,7 @@ static struct event_constraint intel_core_event_constraints[] = EVENT_CONSTRAINT_END }; -static struct event_constraint intel_core2_event_constraints[] = +static struct event_constraint intel_core2_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -70,7 +70,7 @@ static struct event_constraint intel_core2_event_constraints[] = EVENT_CONSTRAINT_END }; -static struct event_constraint intel_nehalem_event_constraints[] = +static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -86,19 +86,19 @@ static struct event_constraint intel_nehalem_event_constraints[] = EVENT_CONSTRAINT_END }; -static struct extra_reg intel_nehalem_extra_regs[] = +static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = { INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), EVENT_EXTRA_END }; -static struct event_constraint intel_nehalem_percore_constraints[] = +static struct event_constraint intel_nehalem_percore_constraints[] __read_mostly = { INTEL_EVENT_CONSTRAINT(0xb7, 0), EVENT_CONSTRAINT_END }; -static struct event_constraint intel_westmere_event_constraints[] = +static struct event_constraint intel_westmere_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -110,7 +110,7 @@ static struct event_constraint intel_westmere_event_constraints[] = EVENT_CONSTRAINT_END }; -static struct event_constraint intel_snb_event_constraints[] = +static struct event_constraint intel_snb_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -123,21 +123,21 @@ static struct event_constraint intel_snb_event_constraints[] = EVENT_CONSTRAINT_END }; -static struct extra_reg intel_westmere_extra_regs[] = +static struct extra_reg intel_westmere_extra_regs[] __read_mostly = { INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff), EVENT_EXTRA_END }; -static struct event_constraint intel_westmere_percore_constraints[] = +static struct event_constraint intel_westmere_percore_constraints[] __read_mostly = { INTEL_EVENT_CONSTRAINT(0xb7, 0), INTEL_EVENT_CONSTRAINT(0xbb, 0), EVENT_CONSTRAINT_END }; -static struct event_constraint intel_gen_event_constraints[] = +static struct event_constraint intel_gen_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ -- cgit v1.2.3 From 11ba2b85f506306c8dfc9fe144aa4ddc43242855 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 24 Apr 2011 15:05:10 +0200 Subject: perf stat: Print stalled cycles percentage Print: 611,527 cycles 400,553 instructions # ( 0.71 instructions per cycle ) 77,809 stalled-cycles # ( 12.71% of all cycles ) 0.000610987 seconds time elapsed Acked-by: Peter Zijlstra Acked-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Signed-off-by: Ingo Molnar Link: http://lkml.kernel.org/n/tip-fd6x8r1cpyb6zhlrc4ix8m45@git.kernel.org --- tools/perf/builtin-stat.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 03f0e45f147..3a29041f857 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -442,7 +442,7 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) if (total) ratio = avg / total; - fprintf(stderr, " # %10.3f IPC ", ratio); + fprintf(stderr, " # ( %4.2f instructions per cycle )", ratio); } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) && runtime_branches_stats[cpu].n != 0) { total = avg_stats(&runtime_branches_stats[cpu]); @@ -450,7 +450,7 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) if (total) ratio = avg * 100 / total; - fprintf(stderr, " # %10.3f %% ", ratio); + fprintf(stderr, " # %10.3f %%", ratio); } else if (runtime_nsecs_stats[cpu].n != 0) { total = avg_stats(&runtime_nsecs_stats[cpu]); @@ -459,6 +459,13 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) ratio = 1000.0 * avg / total; fprintf(stderr, " # %10.3f M/sec", ratio); + } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES)) { + total = avg_stats(&runtime_cycles_stats[cpu]); + + if (total) + ratio = avg / total * 100.0; + + fprintf(stderr, " # (%5.2f%% of all cycles )", ratio); } } -- cgit v1.2.3 From d58f4c82fed69fdd4a79fa54fe17fd14d98c27aa Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 27 Apr 2011 03:42:18 +0200 Subject: perf stat: Print cache misses as percentage Before: 113,393,041 cache-references # 83.636 M/sec 7,052,454 cache-misses # 5.202 M/sec After: 112,589,441 cache-references # 87.925 M/sec 6,556,354 cache-misses # 5.823 % misses/hits percentages are more expressive than absolute numbers or rates. (Also prettify the CPUs printout line to not have a trailing whitespace.) Acked-by: Peter Zijlstra Acked-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-axm28f43x439bl41zkvfzd63@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 3a29041f857..0de3a2002f4 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -157,6 +157,7 @@ static double stddev_stats(struct stats *stats) struct stats runtime_nsecs_stats[MAX_NR_CPUS]; struct stats runtime_cycles_stats[MAX_NR_CPUS]; struct stats runtime_branches_stats[MAX_NR_CPUS]; +struct stats runtime_cacherefs_stats[MAX_NR_CPUS]; struct stats walltime_nsecs_stats; static int create_perf_stat_counter(struct perf_evsel *evsel) @@ -219,10 +220,12 @@ static int read_counter_aggr(struct perf_evsel *counter) */ if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) update_stats(&runtime_nsecs_stats[0], count[0]); - if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) + else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) update_stats(&runtime_cycles_stats[0], count[0]); - if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) + else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) update_stats(&runtime_branches_stats[0], count[0]); + else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES)) + update_stats(&runtime_cacherefs_stats[0], count[0]); return 0; } @@ -404,7 +407,7 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) return; if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) - fprintf(stderr, " # %10.3f CPUs ", + fprintf(stderr, " # %10.3f CPUs", avg / avg_stats(&walltime_nsecs_stats)); } @@ -452,6 +455,15 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) fprintf(stderr, " # %10.3f %%", ratio); + } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) && + runtime_cacherefs_stats[cpu].n != 0) { + total = avg_stats(&runtime_cacherefs_stats[cpu]); + + if (total) + ratio = avg * 100 / total; + + fprintf(stderr, " # %10.3f %%", ratio); + } else if (runtime_nsecs_stats[cpu].n != 0) { total = avg_stats(&runtime_nsecs_stats[cpu]); -- cgit v1.2.3 From b908debd4eef91471016138569f7a9e292be682e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 27 Apr 2011 03:55:40 +0200 Subject: perf tools: Accept case-insensitive symbolic event variants We currently fail on something like '-e CPU-migrations', with: invalid or unsupported event: 'CPU-migrations' While 'CPU-migrations' is how we actually print out the event in the default perf stat output: Performance counter stats for 'true': 0.202204 task-clock-msecs # 0.282 CPUs 0 context-switches # 0.000 M/sec 0 CPU-migrations # 0.000 M/sec So change the matching to be case-insensitive. Acked-by: Peter Zijlstra Acked-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-omcm3edjjtx83a4kh2e244se@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/util/parse-events.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 1869e4c646d..8e54bdb553c 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -649,13 +649,15 @@ static int check_events(const char *str, unsigned int i) int n; n = strlen(event_symbols[i].symbol); - if (!strncmp(str, event_symbols[i].symbol, n)) + if (!strncasecmp(str, event_symbols[i].symbol, n)) return n; n = strlen(event_symbols[i].alias); - if (n) - if (!strncmp(str, event_symbols[i].alias, n)) + if (n) { + if (!strncasecmp(str, event_symbols[i].alias, n)) return n; + } + return 0; } -- cgit v1.2.3 From ceb53fbf6dbb1df26d38379a262c6981fe73dd36 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 27 Apr 2011 04:06:33 +0200 Subject: perf stat: Fail more clearly when an invalid modifier is specified Currently we fail without printing any error message on "perf stat -e task-clock-msecs". The reason is that the task-clock event is matched and the "-msecs" postfix is assumed to be an event modifier - but is not recognized. This patch changes the code to be more informative: $ perf stat -e task-clock-msecs true invalid event modifier: '-msecs' Run 'perf list' for a list of valid events and modifiers And restructures the return value of parse_event_modifier() to allow the printing of all variants of invalid event modifiers. Acked-by: Peter Zijlstra Acked-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-wlaw3dvz1ly6wple8l52cfca@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/util/parse-events.c | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 8e54bdb553c..b686269427e 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -721,15 +721,19 @@ parse_numeric_event(const char **strp, struct perf_event_attr *attr) return EVT_FAILED; } -static enum event_result +static int parse_event_modifier(const char **strp, struct perf_event_attr *attr) { const char *str = *strp; int exclude = 0; int eu = 0, ek = 0, eh = 0, precise = 0; - if (*str++ != ':') + if (!*str) return 0; + + if (*str++ != ':') + return -1; + while (*str) { if (*str == 'u') { if (!exclude) @@ -750,14 +754,16 @@ parse_event_modifier(const char **strp, struct perf_event_attr *attr) ++str; } - if (str >= *strp + 2) { - *strp = str; - attr->exclude_user = eu; - attr->exclude_kernel = ek; - attr->exclude_hv = eh; - attr->precise_ip = precise; - return 1; - } + if (str < *strp + 2) + return -1; + + *strp = str; + + attr->exclude_user = eu; + attr->exclude_kernel = ek; + attr->exclude_hv = eh; + attr->precise_ip = precise; + return 0; } @@ -800,7 +806,12 @@ parse_event_symbols(const struct option *opt, const char **str, return EVT_FAILED; modifier: - parse_event_modifier(str, attr); + if (parse_event_modifier(str, attr) < 0) { + fprintf(stderr, "invalid event modifier: '%s'\n", *str); + fprintf(stderr, "Run 'perf list' for a list of valid events and modifiers\n"); + + return EVT_FAILED; + } return ret; } -- cgit v1.2.3 From 749141d926faf23ef811686a8050e7cf13dc223f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 27 Apr 2011 04:24:57 +0200 Subject: perf stat: Make all displayed event names parseable as well Right now we display this by default: 0.202204 task-clock-msecs # 0.282 CPUs 0 context-switches # 0.000 M/sec 0 CPU-migrations # 0.000 M/sec 85 page-faults # 0.420 M/sec The task-clock-msecs event cannot actually be passed back as an event name, the event name we recognize is 'task-clock'. So change the output of the cpu-clock and task-clock events to be idempotent. ( Units should be printed out in the right-side column, if needed. ) Acked-by: Peter Zijlstra Acked-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-lexrnbzy09asscgd4f7oac4i@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/util/parse-events.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index b686269427e..b5bfef12f39 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -70,8 +70,8 @@ static const char *hw_event_names[] = { }; static const char *sw_event_names[] = { - "cpu-clock-msecs", - "task-clock-msecs", + "cpu-clock", + "task-clock", "page-faults", "context-switches", "CPU-migrations", -- cgit v1.2.3 From dcd9936a5a6d89512b5323c1145647f2dbe0236f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 27 Apr 2011 04:36:37 +0200 Subject: perf stat: Factor our shadow stats Create update_shadow_stats() which is then used in both read_counter_aggr() and read_counter(). This not only simplifies the code but also fixes a bug: HW_CACHE_REFERENCES was not updated in read_counter(). Acked-by: Peter Zijlstra Acked-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-9uc55z3g88r47exde7zxjm6p@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 0de3a2002f4..e5e82f62c78 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -193,6 +193,23 @@ static inline int nsec_counter(struct perf_evsel *evsel) return 0; } +/* + * Update various tracking values we maintain to print + * more semantic information such as miss/hit ratios, + * instruction rates, etc: + */ +static void update_shadow_stats(struct perf_evsel *counter, u64 *count) +{ + if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) + update_stats(&runtime_nsecs_stats[0], count[0]); + else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) + update_stats(&runtime_cycles_stats[0], count[0]); + else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) + update_stats(&runtime_branches_stats[0], count[0]); + else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES)) + update_stats(&runtime_cacherefs_stats[0], count[0]); +} + /* * Read out the results of a single counter: * aggregate counts across CPUs in system-wide mode @@ -218,14 +235,7 @@ static int read_counter_aggr(struct perf_evsel *counter) /* * Save the full runtime - to allow normalization during printout: */ - if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) - update_stats(&runtime_nsecs_stats[0], count[0]); - else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) - update_stats(&runtime_cycles_stats[0], count[0]); - else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) - update_stats(&runtime_branches_stats[0], count[0]); - else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES)) - update_stats(&runtime_cacherefs_stats[0], count[0]); + update_shadow_stats(counter, count); return 0; } @@ -245,12 +255,7 @@ static int read_counter(struct perf_evsel *counter) count = counter->counts->cpu[cpu].values; - if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) - update_stats(&runtime_nsecs_stats[cpu], count[0]); - if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) - update_stats(&runtime_cycles_stats[cpu], count[0]); - if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) - update_stats(&runtime_branches_stats[cpu], count[0]); + update_shadow_stats(counter, count); } return 0; -- cgit v1.2.3 From 481f988a016f7a0327a5537bde4794349fc4625c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 27 Apr 2011 04:34:16 +0200 Subject: perf stat: Add stalled cycles accounting, prettify the resulting output Add stalled cycles accounting and use it to print the "cycles stalled per instruction" value. Also change the unit of the cycles output from M/sec to GHz - this is more intuitive. Prettify the output to: Performance counter stats for './loop_1b_instructions': 239.775036 task-clock # 0.997 CPUs utilized 761,903,912 cycles # 3.178 GHz 356,620,620 stalled-cycles # 46.81% of all cycles are idle 1,001,578,351 instructions # 1.31 insns per cycle # 0.36 stalled cycles per insn 14,782 cache-references # 0.062 M/sec 5,694 cache-misses # 38.520 % of all cache refs 0.240493656 seconds time elapsed Also adjust the --repeat output to make the percentages align vertically: Performance counter stats for './loop_1b_instructions' (10 runs): 236.096793 task-clock # 0.997 CPUs utilized ( +- 0.011% ) 756,553,086 cycles # 3.204 GHz ( +- 0.002% ) 354,942,692 stalled-cycles # 46.92% of all cycles are idle ( +- 0.008% ) 1,001,389,700 instructions # 1.32 insns per cycle # 0.35 stalled cycles per insn ( +- 0.000% ) 10,166 cache-references # 0.043 M/sec ( +- 0.742% ) 468 cache-misses # 4.608 % of all cache refs ( +- 13.385% ) 0.236874136 seconds time elapsed ( +- 0.01% ) Acked-by: Peter Zijlstra Acked-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-uapziqny39601apdmmhoz7hk@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 43 ++++++++++++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index e5e82f62c78..e881c206138 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -156,6 +156,7 @@ static double stddev_stats(struct stats *stats) struct stats runtime_nsecs_stats[MAX_NR_CPUS]; struct stats runtime_cycles_stats[MAX_NR_CPUS]; +struct stats runtime_stalled_cycles_stats[MAX_NR_CPUS]; struct stats runtime_branches_stats[MAX_NR_CPUS]; struct stats runtime_cacherefs_stats[MAX_NR_CPUS]; struct stats walltime_nsecs_stats; @@ -204,6 +205,8 @@ static void update_shadow_stats(struct perf_evsel *counter, u64 *count) update_stats(&runtime_nsecs_stats[0], count[0]); else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) update_stats(&runtime_cycles_stats[0], count[0]); + else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES)) + update_stats(&runtime_stalled_cycles_stats[0], count[0]); else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) update_stats(&runtime_branches_stats[0], count[0]); else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES)) @@ -412,8 +415,7 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) return; if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) - fprintf(stderr, " # %10.3f CPUs", - avg / avg_stats(&walltime_nsecs_stats)); + fprintf(stderr, " # %8.3f CPUs utilized ", avg / avg_stats(&walltime_nsecs_stats)); } static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) @@ -450,7 +452,15 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) if (total) ratio = avg / total; - fprintf(stderr, " # ( %4.2f instructions per cycle )", ratio); + fprintf(stderr, " # %4.2f insns per cycle", ratio); + + total = avg_stats(&runtime_stalled_cycles_stats[cpu]); + + if (total && avg) { + ratio = total / avg; + fprintf(stderr, "\n # %4.2f stalled cycles per insn", ratio); + } + } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) && runtime_branches_stats[cpu].n != 0) { total = avg_stats(&runtime_branches_stats[cpu]); @@ -458,7 +468,7 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) if (total) ratio = avg * 100 / total; - fprintf(stderr, " # %10.3f %%", ratio); + fprintf(stderr, " # %8.3f %% of all branches", ratio); } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) && runtime_cacherefs_stats[cpu].n != 0) { @@ -467,22 +477,29 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) if (total) ratio = avg * 100 / total; - fprintf(stderr, " # %10.3f %%", ratio); + fprintf(stderr, " # %8.3f %% of all cache refs ", ratio); - } else if (runtime_nsecs_stats[cpu].n != 0) { + } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES)) { + total = avg_stats(&runtime_cycles_stats[cpu]); + + if (total) + ratio = avg / total * 100.0; + + fprintf(stderr, " # %5.2f%% of all cycles are idle ", ratio); + } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) { total = avg_stats(&runtime_nsecs_stats[cpu]); if (total) - ratio = 1000.0 * avg / total; + ratio = 1.0 * avg / total; - fprintf(stderr, " # %10.3f M/sec", ratio); - } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES)) { - total = avg_stats(&runtime_cycles_stats[cpu]); + fprintf(stderr, " # %8.3f GHz ", ratio); + } else if (runtime_nsecs_stats[cpu].n != 0) { + total = avg_stats(&runtime_nsecs_stats[cpu]); if (total) - ratio = avg / total * 100.0; + ratio = 1000.0 * avg / total; - fprintf(stderr, " # (%5.2f%% of all cycles )", ratio); + fprintf(stderr, " # %8.3f M/sec ", ratio); } } @@ -619,7 +636,7 @@ static void print_stat(int argc, const char **argv) fprintf(stderr, " %18.9f seconds time elapsed", avg_stats(&walltime_nsecs_stats)/1e9); if (run_count > 1) { - fprintf(stderr, " ( +- %7.3f%% )", + fprintf(stderr, " ( +-%5.2f%% )", 100*stddev_stats(&walltime_nsecs_stats) / avg_stats(&walltime_nsecs_stats)); } -- cgit v1.2.3 From 1fc570ad89e55dc32dfa4dda1311948b38f26524 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 27 Apr 2011 05:20:22 +0200 Subject: perf stat: Add stalled cycles to the default output The new default output looks like this: Performance counter stats for './loop_1b_instructions': 236.010686 task-clock # 0.996 CPUs utilized 0 context-switches # 0.000 M/sec 0 CPU-migrations # 0.000 M/sec 99 page-faults # 0.000 M/sec 756,487,646 cycles # 3.205 GHz 354,938,996 stalled-cycles # 46.92% of all cycles are idle 1,001,403,797 instructions # 1.32 insns per cycle # 0.35 stalled cycles per insn 100,279,773 branches # 424.895 M/sec 12,646 branch-misses # 0.013 % of all branches 0.236902540 seconds time elapsed We dropped cache-refs and cache-misses and added stalled-cycles - this is a more generic "how well utilized is the CPU" metric. If the stalled-cycles ratio is too high then more specific measurements can be taken to figure out the source of the inefficiency. Acked-by: Peter Zijlstra Acked-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-pbpl2l4mn797s69bclfpwkwn@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 5 ++--- tools/perf/util/parse-events.c | 11 ++++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index e881c206138..924d18c407b 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -65,11 +65,10 @@ static struct perf_event_attr default_attrs[] = { { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES }, }; @@ -468,7 +467,7 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) if (total) ratio = avg * 100 / total; - fprintf(stderr, " # %8.3f %% of all branches", ratio); + fprintf(stderr, " # %5.2f %% of all branches ", ratio); } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) && runtime_cacherefs_stats[cpu].n != 0) { diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index b5bfef12f39..bbbb735268e 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -32,13 +32,13 @@ char debugfs_path[MAXPATHLEN]; static struct event_symbol event_symbols[] = { { CHW(CPU_CYCLES), "cpu-cycles", "cycles" }, + { CHW(STALLED_CYCLES), "stalled-cycles", "idle-cycles" }, { CHW(INSTRUCTIONS), "instructions", "" }, { CHW(CACHE_REFERENCES), "cache-references", "" }, { CHW(CACHE_MISSES), "cache-misses", "" }, { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" }, { CHW(BRANCH_MISSES), "branch-misses", "" }, { CHW(BUS_CYCLES), "bus-cycles", "" }, - { CHW(STALLED_CYCLES), "stalled-cycles", "" }, { CSW(CPU_CLOCK), "cpu-clock", "" }, { CSW(TASK_CLOCK), "task-clock", "" }, @@ -54,9 +54,9 @@ static struct event_symbol event_symbols[] = { #define __PERF_EVENT_FIELD(config, name) \ ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT) -#define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW) +#define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW) #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG) -#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) +#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) static const char *hw_event_names[] = { @@ -67,6 +67,7 @@ static const char *hw_event_names[] = { "branches", "branch-misses", "bus-cycles", + "stalled-cycles", }; static const char *sw_event_names[] = { @@ -308,7 +309,7 @@ const char *__event_name(int type, u64 config) switch (type) { case PERF_TYPE_HARDWARE: - if (config < PERF_COUNT_HW_MAX) + if (config < PERF_COUNT_HW_MAX && hw_event_names[config]) return hw_event_names[config]; return "unknown-hardware"; @@ -334,7 +335,7 @@ const char *__event_name(int type, u64 config) } case PERF_TYPE_SOFTWARE: - if (config < PERF_COUNT_SW_MAX) + if (config < PERF_COUNT_SW_MAX && sw_event_names[config]) return sw_event_names[config]; return "unknown-software"; -- cgit v1.2.3 From f99844cb76b7d347711c22cdcb94266b7214141f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 27 Apr 2011 05:35:39 +0200 Subject: perf stat: Fix -nan% output in perf stat noise printouts Before: 0 CPU-migrations # 0.000 M/sec ( +- -nan% ) After: 0 CPU-migrations # 0.000 M/sec ( +- 0.00% ) Also factor out the noise printing function. Acked-by: Peter Zijlstra Acked-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-z89h2v1bk1mikcbsf7e6v34q@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 924d18c407b..845ded8f15a 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -382,6 +382,16 @@ static int run_perf_stat(int argc __used, const char **argv) return WEXITSTATUS(status); } +static void print_noise_pct(double total, double avg) +{ + double pct = 0.0; + + if (avg) + pct = 100.0*total/avg; + + fprintf(stderr, " ( +-%6.2f%% )", pct); +} + static void print_noise(struct perf_evsel *evsel, double avg) { struct perf_stat *ps; @@ -390,8 +400,7 @@ static void print_noise(struct perf_evsel *evsel, double avg) return; ps = evsel->priv; - fprintf(stderr, " ( +- %7.3f%% )", - 100 * stddev_stats(&ps->res_stats[0]) / avg); + print_noise_pct(stddev_stats(&ps->res_stats[0]), avg); } static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) @@ -635,9 +644,8 @@ static void print_stat(int argc, const char **argv) fprintf(stderr, " %18.9f seconds time elapsed", avg_stats(&walltime_nsecs_stats)/1e9); if (run_count > 1) { - fprintf(stderr, " ( +-%5.2f%% )", - 100*stddev_stats(&walltime_nsecs_stats) / - avg_stats(&walltime_nsecs_stats)); + print_noise_pct(stddev_stats(&walltime_nsecs_stats), + avg_stats(&walltime_nsecs_stats)); } fprintf(stderr, "\n\n"); } -- cgit v1.2.3 From a5d243d04a150acbfa79d641154f49e5d920f64f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 27 Apr 2011 05:39:24 +0200 Subject: perf stat: Print stalled cycles warning colors Print the stalled-cycles percentage with different warning level ASCII colors, as the percentage passes the 25%/50%/75% thresholds. Acked-by: Peter Zijlstra Acked-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-e25zz44rcms7mu9az4fu5zp0@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 845ded8f15a..e7d91f9cf78 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -46,6 +46,7 @@ #include "util/evlist.h" #include "util/evsel.h" #include "util/debug.h" +#include "util/color.h" #include "util/header.h" #include "util/cpumap.h" #include "util/thread.h" @@ -426,6 +427,29 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) fprintf(stderr, " # %8.3f CPUs utilized ", avg / avg_stats(&walltime_nsecs_stats)); } +static void print_stalled_cycles(int cpu, struct perf_evsel *evsel __used, double avg) +{ + double total, ratio = 0.0; + const char *color; + + total = avg_stats(&runtime_cycles_stats[cpu]); + + if (total) + ratio = avg / total * 100.0; + + color = PERF_COLOR_NORMAL; + if (ratio > 75.0) + color = PERF_COLOR_RED; + else if (ratio > 50.0) + color = PERF_COLOR_MAGENTA; + else if (ratio > 25.0) + color = PERF_COLOR_YELLOW; + + fprintf(stderr, " # "); + color_fprintf(stderr, color, "%5.2f%%", ratio); + fprintf(stderr, " of all cycles are idle "); +} + static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) { double total, ratio = 0.0; @@ -488,12 +512,7 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) fprintf(stderr, " # %8.3f %% of all cache refs ", ratio); } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES)) { - total = avg_stats(&runtime_cycles_stats[cpu]); - - if (total) - ratio = avg / total * 100.0; - - fprintf(stderr, " # %5.2f%% of all cycles are idle ", ratio); + print_stalled_cycles(cpu, evsel, avg); } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) { total = avg_stats(&runtime_nsecs_stats[cpu]); @@ -508,6 +527,8 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) ratio = 1000.0 * avg / total; fprintf(stderr, " # %8.3f M/sec ", ratio); + } else { + fprintf(stderr, " "); } } -- cgit v1.2.3 From c78df6c1d49b5d798f1579141e3a12be7c325d1e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 27 Apr 2011 12:16:10 +0200 Subject: perf stat: Print branch misses warning colors Print the missed-branches percentage with different warning level ASCII colors, as the percentage passes the 5%/10%/20% thresholds. These thresholds are set to relatively low levels, because on most CPUs even a moderate percentage of branch-misses already shows up as a slowdown. Acked-by: Peter Zijlstra Acked-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-ybqukg7p86leiup7gl03ecgk@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 31 ++++++++++++++++++++++++------- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index e7d91f9cf78..5d4e1b9b2d8 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -450,6 +450,29 @@ static void print_stalled_cycles(int cpu, struct perf_evsel *evsel __used, doubl fprintf(stderr, " of all cycles are idle "); } +static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double avg) +{ + double total, ratio = 0.0; + const char *color; + + total = avg_stats(&runtime_branches_stats[cpu]); + + if (total) + ratio = avg / total * 100.0; + + color = PERF_COLOR_NORMAL; + if (ratio > 20.0) + color = PERF_COLOR_RED; + else if (ratio > 10.0) + color = PERF_COLOR_MAGENTA; + else if (ratio > 5.0) + color = PERF_COLOR_YELLOW; + + fprintf(stderr, " # "); + color_fprintf(stderr, color, "%5.2f%%", ratio); + fprintf(stderr, " of all branches "); +} + static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) { double total, ratio = 0.0; @@ -495,13 +518,7 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) && runtime_branches_stats[cpu].n != 0) { - total = avg_stats(&runtime_branches_stats[cpu]); - - if (total) - ratio = avg * 100 / total; - - fprintf(stderr, " # %5.2f %% of all branches ", ratio); - + print_branch_misses(cpu, evsel, avg); } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) && runtime_cacherefs_stats[cpu].n != 0) { total = avg_stats(&runtime_cacherefs_stats[cpu]); -- cgit v1.2.3 From 8bb6c79f24e66538f606076915e918242c02ec7c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 27 Apr 2011 13:25:24 +0200 Subject: perf stat: Print out miss/hit ratio for L1 data-cache events Print out this kind of l1-dcache-misses percentage: Performance counter stats for './bw_tcp localhost': 29,956,262,201 cycles # 3.002 GHz (scaled from 85.14%) 8,255,209,558 stalled-cycles # 27.56% of all cycles are idle (scaled from 86.56%) 1,206,130,308 l1-dcache-misses # 40.49% of all L1-dcache hits (scaled from 86.30%) 2,978,756,779 l1-dcache-refs # 298.512 M/sec (scaled from 70.02%) 8,861,956,159 instructions # 0.30 insns per cycle # 0.93 stalled cycles per insn (scaled from 84.27%) 1,644,306,068 branches # 164.782 M/sec (scaled from 86.43%) 74,778,443 branch-misses # 4.55% of all branches (scaled from 70.69%) 9978.695711 task-clock # 0.693 CPUs utilized 14.404347983 seconds time elapsed And color the result depending on the severity of cache-trashing. Acked-by: Peter Zijlstra Acked-by Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-54gmz0zymaid84zcs7joq02p@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 5d4e1b9b2d8..03bac6aa014 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -159,6 +159,7 @@ struct stats runtime_cycles_stats[MAX_NR_CPUS]; struct stats runtime_stalled_cycles_stats[MAX_NR_CPUS]; struct stats runtime_branches_stats[MAX_NR_CPUS]; struct stats runtime_cacherefs_stats[MAX_NR_CPUS]; +struct stats runtime_l1_dcache_stats[MAX_NR_CPUS]; struct stats walltime_nsecs_stats; static int create_perf_stat_counter(struct perf_evsel *evsel) @@ -211,6 +212,8 @@ static void update_shadow_stats(struct perf_evsel *counter, u64 *count) update_stats(&runtime_branches_stats[0], count[0]); else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES)) update_stats(&runtime_cacherefs_stats[0], count[0]); + else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D)) + update_stats(&runtime_l1_dcache_stats[0], count[0]); } /* @@ -473,6 +476,29 @@ static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double fprintf(stderr, " of all branches "); } +static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, double avg) +{ + double total, ratio = 0.0; + const char *color; + + total = avg_stats(&runtime_l1_dcache_stats[cpu]); + + if (total) + ratio = avg / total * 100.0; + + color = PERF_COLOR_NORMAL; + if (ratio > 20.0) + color = PERF_COLOR_RED; + else if (ratio > 10.0) + color = PERF_COLOR_MAGENTA; + else if (ratio > 5.0) + color = PERF_COLOR_YELLOW; + + fprintf(stderr, " # "); + color_fprintf(stderr, color, "%5.2f%%", ratio); + fprintf(stderr, " of all L1-dcache hits "); +} + static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) { double total, ratio = 0.0; @@ -519,6 +545,13 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) && runtime_branches_stats[cpu].n != 0) { print_branch_misses(cpu, evsel, avg); + } else if ( + evsel->attr.type == PERF_TYPE_HW_CACHE && + evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D | + ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | + ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && + runtime_branches_stats[cpu].n != 0) { + print_l1_dcache_misses(cpu, evsel, avg); } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) && runtime_cacherefs_stats[cpu].n != 0) { total = avg_stats(&runtime_cacherefs_stats[cpu]); -- cgit v1.2.3 From c6264deff7ea6125492b442edad885e5429679af Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 27 Apr 2011 13:50:47 +0200 Subject: perf stat: Add -d/--detailed flag to run with a lot of events Add the new -d/--detailed flag, which generates a pretty detailed event list: Performance counter stats for './hackbench 10' (10 runs): 1514.287888 task-clock # 10.897 CPUs utilized ( +- 3.05% ) 39,698 context-switches # 0.026 M/sec ( +- 12.19% ) 8,147 CPU-migrations # 0.005 M/sec ( +- 16.55% ) 17,918 page-faults # 0.012 M/sec ( +- 0.37% ) 2,944,504,050 cycles # 1.944 GHz ( +- 3.89% ) (32.60%) 1,043,971,283 stalled-cycles # 35.45% of all cycles are idle ( +- 5.22% ) (44.48%) 1,655,906,768 instructions # 0.56 insns per cycle # 0.63 stalled cycles per insn ( +- 1.95% ) (55.09%) 338,832,373 branches # 223.757 M/sec ( +- 1.96% ) (64.47%) 3,892,416 branch-misses # 1.15% of all branches ( +- 5.49% ) (73.12%) 606,410,482 L1-dcache-loads # 400.459 M/sec ( +- 1.29% ) (71.21%) 31,204,395 L1-dcache-load-misses # 5.15% of all L1-dcache hits ( +- 3.04% ) (60.43%) 3,922,751 LLC-loads # 2.590 M/sec ( +- 6.80% ) (46.87%) 5,037,288 LLC-load-misses # 3.327 M/sec ( +- 3.56% ) (13.00%) 0.138966828 seconds time elapsed ( +- 4.11% ) This can be used "at a glance" for narrower analysis. -d can also be used in addition to other -e events, to further expand an event list. Acked-by: Peter Zijlstra Acked-by: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-cxs98quixs3qyvdqx3goojc4@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 68 +++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 60 insertions(+), 8 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 03bac6aa014..6959fdecb20 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -73,6 +73,47 @@ static struct perf_event_attr default_attrs[] = { }; +/* + * Detailed stats: + */ +static struct perf_event_attr detailed_attrs[] = { + + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, + { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, + + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, + + { .type = PERF_TYPE_HW_CACHE, + .config = + PERF_COUNT_HW_CACHE_L1D << 0 | + (PERF_COUNT_HW_CACHE_OP_READ << 8) | + (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, + + { .type = PERF_TYPE_HW_CACHE, + .config = + PERF_COUNT_HW_CACHE_L1D << 0 | + (PERF_COUNT_HW_CACHE_OP_READ << 8) | + (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, + + { .type = PERF_TYPE_HW_CACHE, + .config = + PERF_COUNT_HW_CACHE_LL << 0 | + (PERF_COUNT_HW_CACHE_OP_READ << 8) | + (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, + + { .type = PERF_TYPE_HW_CACHE, + .config = + PERF_COUNT_HW_CACHE_LL << 0 | + (PERF_COUNT_HW_CACHE_OP_READ << 8) | + (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, +}; + struct perf_evlist *evsel_list; static bool system_wide = false; @@ -86,6 +127,7 @@ static pid_t target_pid = -1; static pid_t target_tid = -1; static pid_t child_pid = -1; static bool null_run = false; +static bool detailed_run = false; static bool big_num = true; static int big_num_opt = -1; static const char *cpu_list; @@ -550,7 +592,7 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D | ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && - runtime_branches_stats[cpu].n != 0) { + runtime_l1_dcache_stats[cpu].n != 0) { print_l1_dcache_misses(cpu, evsel, avg); } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) && runtime_cacherefs_stats[cpu].n != 0) { @@ -625,8 +667,7 @@ static void print_counter_aggr(struct perf_evsel *counter) avg_enabled = avg_stats(&ps->res_stats[1]); avg_running = avg_stats(&ps->res_stats[2]); - fprintf(stderr, " (scaled from %.2f%%)", - 100 * avg_running / avg_enabled); + fprintf(stderr, " (%.2f%%)", 100 * avg_running / avg_enabled); } fprintf(stderr, "\n"); } @@ -668,10 +709,8 @@ static void print_counter(struct perf_evsel *counter) if (!csv_output) { print_noise(counter, 1.0); - if (run != ena) { - fprintf(stderr, " (scaled from %.2f%%)", - 100.0 * run / ena); - } + if (run != ena) + fprintf(stderr, " (%.2f%%)", 100.0 * run / ena); } fputc('\n', stderr); } @@ -778,6 +817,8 @@ static const struct option options[] = { "repeat command and print average + stddev (max: 100)"), OPT_BOOLEAN('n', "null", &null_run, "null run - dont start any counters"), + OPT_BOOLEAN('d', "detailed", &detailed_run, + "detailed run - start a lot of events"), OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, "print large numbers with thousands\' separators", stat__set_big_num), @@ -839,7 +880,18 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) } /* Set attrs and nr_counters if no event is selected and !null_run */ - if (!null_run && !evsel_list->nr_entries) { + if (detailed_run) { + size_t c; + + for (c = 0; c < ARRAY_SIZE(detailed_attrs); ++c) { + pos = perf_evsel__new(&detailed_attrs[c], c); + if (pos == NULL) + goto out; + perf_evlist__add(evsel_list, pos); + } + } + /* Set attrs and nr_counters if no event is selected and !null_run */ + if (!detailed_run && !null_run && !evsel_list->nr_entries) { size_t c; for (c = 0; c < ARRAY_SIZE(default_attrs); ++c) { -- cgit v1.2.3 From 9ceb1c3d1fe15c2f9b55eaa8978019ef0e0a06ac Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 28 Apr 2011 02:57:53 +0200 Subject: perf stat: Fix printout vertical alignment Before: | | Performance counter stats for '/home/mingo/hackbench 20' (5 runs): | | 71,321,607 instructions:u # 0.42 insns per cycle ( +- 0.00% ) | 168,040,009 cycles:u # 0.000 GHz ( +- 0.81% ) | | 1.468002368 seconds time elapsed ( +- 1.33% ) | After: | | Performance counter stats for '/home/mingo/hackbench 20' (5 runs): | | 71,321,607 instructions:u # 0.42 insns per cycle ( +- 0.00% ) | 168,040,009 cycles:u # 0.000 GHz ( +- 0.81% ) | | 1.468002368 seconds time elapsed ( +- 1.33% ) | The last column (stddev noise) is properly aligned, vertically. Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-7y40wib8n1eqio7hjpn0dsrm@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 6959fdecb20..003caa857a4 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -575,7 +575,7 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) if (total) ratio = avg / total; - fprintf(stderr, " # %4.2f insns per cycle", ratio); + fprintf(stderr, " # %4.2f insns per cycle ", ratio); total = avg_stats(&runtime_stalled_cycles_stats[cpu]); -- cgit v1.2.3 From 8a850cadca0e387c87a0911a61e99fd66aeb57ec Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 28 Apr 2011 11:16:44 +0200 Subject: perf event, x86: Use better stalled cycles metric Use the UOPS_EXECUTED.*,c=1,i=1 event on Intel CPUs - it is a rather good indicator of CPU execution stalls, more sensitive and more inclusive than the 0xa2 resource stalls event (which does not count nearly as many stall types). Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-7y40wib8n1eqio7hjpn2dsrm@git.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 067a48b13a7..1ea94224f62 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1413,8 +1413,8 @@ static __init int intel_pmu_init(void) x86_pmu.enable_all = intel_pmu_nhm_enable_all; x86_pmu.extra_regs = intel_nehalem_extra_regs; - /* Install the stalled-cycles event: 0xff: All reasons, 0xa2: Resource stalls */ - intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES] = 0xffa2; + /* Install the stalled-cycles event: UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES] = 0x1803fb1; if (ebx & 0x40) { /* -- cgit v1.2.3 From f9cef0a90c4e7637f1ec98474a1a099aec45eb65 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 28 Apr 2011 18:17:11 +0200 Subject: perf stat: Add --sync/-S option --sync will tell perf stat to run sync() before starting a command. This allows IO-heavy tests to be used with --repeat, without one iteration impacting the other. Elapsed time will stabilize for example: before: 3.971525714 seconds time elapsed ( +- 8.56% ) after: 3.211098537 seconds time elapsed ( +- 1.52% ) So measurements will be more accurate. Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-7y40wib8n1eqio7hjpn1dsrm@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 003caa857a4..5658a770dbd 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -128,6 +128,7 @@ static pid_t target_tid = -1; static pid_t child_pid = -1; static bool null_run = false; static bool detailed_run = false; +static bool sync_run = false; static bool big_num = true; static int big_num_opt = -1; static const char *cpu_list; @@ -819,6 +820,8 @@ static const struct option options[] = { "null run - dont start any counters"), OPT_BOOLEAN('d', "detailed", &detailed_run, "detailed run - start a lot of events"), + OPT_BOOLEAN('S', "sync", &sync_run, + "call sync() before starting a run"), OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, "print large numbers with thousands\' separators", stat__set_big_num), @@ -944,6 +947,10 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) for (run_idx = 0; run_idx < run_count; run_idx++) { if (run_count != 1 && verbose) fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx + 1); + + if (sync_run) + sync(); + status = run_perf_stat(argc, argv); } -- cgit v1.2.3 From ede70290046043b2638204cab55e26ea1d0c6cd9 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 28 Apr 2011 08:48:42 +0200 Subject: perf stat: Fix compatibility behavior Instead of failing on an unknown event, when new perf stat is run on older kernels: $ ./perf stat true Error: open_counter returned with 22 (Invalid argument). /bin/dmesg may provide additional information. Fatal: Not all events could be opened. Just ignore EINVAL and ENOSYS, we'll print the results as not counted: Performance counter stats for 'true': 0.239483 task-clock # 0.493 CPUs utilized 0 context-switches # 0.000 M/sec 0 CPU-migrations # 0.000 M/sec 86 page-faults # 0.359 M/sec 704,766 cycles # 2.943 GHz stalled-cycles 381,961 instructions # 0.54 insns per cycle 69,626 branches # 290.735 M/sec 4,594 branch-misses # 6.60% of all branches 0.000485883 seconds time elapsed Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-7y40wib8n1eqio5hjpn3dsrm@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 5658a770dbd..da77077450c 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -372,7 +372,10 @@ static int run_perf_stat(int argc __used, const char **argv) list_for_each_entry(counter, &evsel_list->entries, node) { if (create_perf_stat_counter(counter) < 0) { - if (errno == -EPERM || errno == -EACCES) { + if (errno == EINVAL || errno == ENOSYS) + continue; + + if (errno == EPERM || errno == EACCES) { error("You may not have permission to collect %sstats.\n" "\t Consider tweaking" " /proc/sys/kernel/perf_event_paranoid or running as root.", -- cgit v1.2.3 From 8f62242246351b5a4bc0c1f00c0c7003edea128a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 29 Apr 2011 13:19:47 +0200 Subject: perf events: Add generic front-end and back-end stalled cycle event definitions Add two generic hardware events: front-end and back-end stalled cycles. These events measure conditions when the CPU is executing code but its capabilities are not fully utilized. Understanding such situations and analyzing them is an important sub-task of code optimization workflows. Both events limit performance: most front end stalls tend to be caused by branch misprediction or instruction fetch cachemisses, backend stalls can be caused by various resource shortages or inefficient instruction scheduling. Front-end stalls are the more important ones: code cannot run fast if the instruction stream is not being kept up. An over-utilized back-end can cause front-end stalls and thus has to be kept an eye on as well. The exact composition is very program logic and instruction mix dependent. We use the terms 'stall', 'front-end' and 'back-end' loosely and try to use the best available events from specific CPUs that approximate these concepts. Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-7y40wib8n000io7hjpn1dsrm@git.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel.c | 2 +- include/linux/perf_event.h | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 1ea94224f62..393085b87a2 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1414,7 +1414,7 @@ static __init int intel_pmu_init(void) x86_pmu.extra_regs = intel_nehalem_extra_regs; /* Install the stalled-cycles event: UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ - intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES] = 0x1803fb1; + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; if (ebx & 0x40) { /* diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index ac636dd20a0..4e2d7ae7149 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -52,7 +52,8 @@ enum perf_hw_id { PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, PERF_COUNT_HW_BRANCH_MISSES = 5, PERF_COUNT_HW_BUS_CYCLES = 6, - PERF_COUNT_HW_STALLED_CYCLES = 7, + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, PERF_COUNT_HW_MAX, /* non-ABI */ }; -- cgit v1.2.3 From 91fc4cc00099986bc1ba50e1f421c3548cffae42 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 29 Apr 2011 14:17:19 +0200 Subject: perf, x86: Add new stalled cycles events for Intel and AMD CPUs Extend the Intel and AMD event definitions with generic front-end and back-end stall events. ( These are only approximations - suggestions are welcome for better events. ) Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-7y40wib8n001io7hjpn1dsrm@git.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_amd.c | 14 ++++++++------ arch/x86/kernel/cpu/perf_event_intel.c | 4 +++- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index cf4e369cea6..fe29c1d2219 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -96,12 +96,14 @@ static __initconst const u64 amd_hw_cache_event_ids */ static const u64 amd_perfmon_event_map[] = { - [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, - [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, - [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, - [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, - [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, + [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, + [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, + [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */ }; static u64 amd_pmu_event_map(int hw_event) diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 393085b87a2..7983b9a9533 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1413,7 +1413,9 @@ static __init int intel_pmu_init(void) x86_pmu.enable_all = intel_pmu_nhm_enable_all; x86_pmu.extra_regs = intel_nehalem_extra_regs; - /* Install the stalled-cycles event: UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ + /* UOPS_ISSUED.STALLED_CYCLES */ + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; + /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; if (ebx & 0x40) { -- cgit v1.2.3 From 129c04cb8ce2e4bf3f17223f58ef16aa8a2cb3b8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 29 Apr 2011 14:41:28 +0200 Subject: perf tools: Add front-end and back-end stalled cycles support Update perf tooling to deal with front-end and back-end stalled cycles events. Add both the default 'perf stat' output. Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-7y40wib8n002io7hjpn1dsrm@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 10 ++++++---- tools/perf/util/parse-events.c | 37 +++++++++++++++++++------------------ tools/perf/util/python.c | 4 +++- 3 files changed, 28 insertions(+), 23 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index da77077450c..6a4a8a399d9 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -66,7 +66,8 @@ static struct perf_event_attr default_attrs[] = { { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, @@ -84,7 +85,8 @@ static struct perf_event_attr detailed_attrs[] = { { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, + { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, @@ -249,7 +251,7 @@ static void update_shadow_stats(struct perf_evsel *counter, u64 *count) update_stats(&runtime_nsecs_stats[0], count[0]); else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) update_stats(&runtime_cycles_stats[0], count[0]); - else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES)) + else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND)) update_stats(&runtime_stalled_cycles_stats[0], count[0]); else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) update_stats(&runtime_branches_stats[0], count[0]); @@ -607,7 +609,7 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) fprintf(stderr, " # %8.3f %% of all cache refs ", ratio); - } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES)) { + } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) { print_stalled_cycles(cpu, evsel, avg); } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) { total = avg_stats(&runtime_nsecs_stats[cpu]); diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index bbbb735268e..04d2f0a9667 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -31,24 +31,25 @@ char debugfs_path[MAXPATHLEN]; #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x static struct event_symbol event_symbols[] = { - { CHW(CPU_CYCLES), "cpu-cycles", "cycles" }, - { CHW(STALLED_CYCLES), "stalled-cycles", "idle-cycles" }, - { CHW(INSTRUCTIONS), "instructions", "" }, - { CHW(CACHE_REFERENCES), "cache-references", "" }, - { CHW(CACHE_MISSES), "cache-misses", "" }, - { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" }, - { CHW(BRANCH_MISSES), "branch-misses", "" }, - { CHW(BUS_CYCLES), "bus-cycles", "" }, - - { CSW(CPU_CLOCK), "cpu-clock", "" }, - { CSW(TASK_CLOCK), "task-clock", "" }, - { CSW(PAGE_FAULTS), "page-faults", "faults" }, - { CSW(PAGE_FAULTS_MIN), "minor-faults", "" }, - { CSW(PAGE_FAULTS_MAJ), "major-faults", "" }, - { CSW(CONTEXT_SWITCHES), "context-switches", "cs" }, - { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, - { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" }, - { CSW(EMULATION_FAULTS), "emulation-faults", "" }, + { CHW(CPU_CYCLES), "cpu-cycles", "cycles" }, + { CHW(STALLED_CYCLES_FRONTEND), "stalled-cycles-frontend", "idle-cycles-frontend" }, + { CHW(STALLED_CYCLES_BACKEND), "stalled-cycles-backend", "idle-cycles-backend" }, + { CHW(INSTRUCTIONS), "instructions", "" }, + { CHW(CACHE_REFERENCES), "cache-references", "" }, + { CHW(CACHE_MISSES), "cache-misses", "" }, + { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" }, + { CHW(BRANCH_MISSES), "branch-misses", "" }, + { CHW(BUS_CYCLES), "bus-cycles", "" }, + + { CSW(CPU_CLOCK), "cpu-clock", "" }, + { CSW(TASK_CLOCK), "task-clock", "" }, + { CSW(PAGE_FAULTS), "page-faults", "faults" }, + { CSW(PAGE_FAULTS_MIN), "minor-faults", "" }, + { CSW(PAGE_FAULTS_MAJ), "major-faults", "" }, + { CSW(CONTEXT_SWITCHES), "context-switches", "cs" }, + { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, + { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" }, + { CSW(EMULATION_FAULTS), "emulation-faults", "" }, }; #define __PERF_EVENT_FIELD(config, name) \ diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 406f613ee61..8b0eff8b828 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -798,7 +798,6 @@ static struct { { "COUNT_HW_BRANCH_INSTRUCTIONS", PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, { "COUNT_HW_BRANCH_MISSES", PERF_COUNT_HW_BRANCH_MISSES }, { "COUNT_HW_BUS_CYCLES", PERF_COUNT_HW_BUS_CYCLES }, - { "COUNT_HW_STALLED_CYCLES", PERF_COUNT_HW_STALLED_CYCLES }, { "COUNT_HW_CACHE_L1D", PERF_COUNT_HW_CACHE_L1D }, { "COUNT_HW_CACHE_L1I", PERF_COUNT_HW_CACHE_L1I }, { "COUNT_HW_CACHE_LL", PERF_COUNT_HW_CACHE_LL }, @@ -811,6 +810,9 @@ static struct { { "COUNT_HW_CACHE_RESULT_ACCESS", PERF_COUNT_HW_CACHE_RESULT_ACCESS }, { "COUNT_HW_CACHE_RESULT_MISS", PERF_COUNT_HW_CACHE_RESULT_MISS }, + { "COUNT_HW_STALLED_CYCLES_FRONTEND", PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, + { "COUNT_HW_STALLED_CYCLES_BACKEND", PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, + { "COUNT_SW_CPU_CLOCK", PERF_COUNT_SW_CPU_CLOCK }, { "COUNT_SW_TASK_CLOCK", PERF_COUNT_SW_TASK_CLOCK }, { "COUNT_SW_PAGE_FAULTS", PERF_COUNT_SW_PAGE_FAULTS }, -- cgit v1.2.3 From d3d1e86da07b4565815e3dbcd082f53017d215f8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 29 Apr 2011 13:49:08 +0200 Subject: perf stat: Analyze front-end and back-end stall counts Sample output: Performance counter stats for './loop_1b': 873.691065 task-clock # 1.000 CPUs utilized 1 context-switches # 0.000 M/sec 1 CPU-migrations # 0.000 M/sec 96 page-faults # 0.000 M/sec 2,012,637,222 cycles # 2.304 GHz (66.58%) 1,001,397,911 stalled-cycles-frontend # 49.76% frontend cycles idle (66.58%) 7,523,398 stalled-cycles-backend # 0.37% backend cycles idle (66.76%) 2,004,551,046 instructions # 1.00 insns per cycle # 0.50 stalled cycles per insn (66.80%) 1,001,304,992 branches # 1146.063 M/sec (66.76%) 39,453 branch-misses # 0.00% of all branches (66.64%) 0.874046121 seconds time elapsed Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-7y40wib8n003io7hjpn1dsrm@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 41 +++++++++++++++++++++++++++++++++++------ tools/perf/util/parse-events.c | 7 ++++--- 2 files changed, 39 insertions(+), 9 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 6a4a8a399d9..e45449938b8 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -201,7 +201,8 @@ static double stddev_stats(struct stats *stats) struct stats runtime_nsecs_stats[MAX_NR_CPUS]; struct stats runtime_cycles_stats[MAX_NR_CPUS]; -struct stats runtime_stalled_cycles_stats[MAX_NR_CPUS]; +struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS]; +struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS]; struct stats runtime_branches_stats[MAX_NR_CPUS]; struct stats runtime_cacherefs_stats[MAX_NR_CPUS]; struct stats runtime_l1_dcache_stats[MAX_NR_CPUS]; @@ -251,8 +252,10 @@ static void update_shadow_stats(struct perf_evsel *counter, u64 *count) update_stats(&runtime_nsecs_stats[0], count[0]); else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) update_stats(&runtime_cycles_stats[0], count[0]); + else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) + update_stats(&runtime_stalled_cycles_front_stats[0], count[0]); else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND)) - update_stats(&runtime_stalled_cycles_stats[0], count[0]); + update_stats(&runtime_stalled_cycles_back_stats[0], count[0]); else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) update_stats(&runtime_branches_stats[0], count[0]); else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES)) @@ -478,7 +481,30 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) fprintf(stderr, " # %8.3f CPUs utilized ", avg / avg_stats(&walltime_nsecs_stats)); } -static void print_stalled_cycles(int cpu, struct perf_evsel *evsel __used, double avg) +static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg) +{ + double total, ratio = 0.0; + const char *color; + + total = avg_stats(&runtime_cycles_stats[cpu]); + + if (total) + ratio = avg / total * 100.0; + + color = PERF_COLOR_NORMAL; + if (ratio > 75.0) + color = PERF_COLOR_RED; + else if (ratio > 50.0) + color = PERF_COLOR_MAGENTA; + else if (ratio > 20.0) + color = PERF_COLOR_YELLOW; + + fprintf(stderr, " # "); + color_fprintf(stderr, color, "%5.2f%%", ratio); + fprintf(stderr, " frontend cycles idle "); +} + +static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __used, double avg) { double total, ratio = 0.0; const char *color; @@ -498,7 +524,7 @@ static void print_stalled_cycles(int cpu, struct perf_evsel *evsel __used, doubl fprintf(stderr, " # "); color_fprintf(stderr, color, "%5.2f%%", ratio); - fprintf(stderr, " of all cycles are idle "); + fprintf(stderr, " backend cycles idle "); } static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double avg) @@ -583,7 +609,8 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) fprintf(stderr, " # %4.2f insns per cycle ", ratio); - total = avg_stats(&runtime_stalled_cycles_stats[cpu]); + total = avg_stats(&runtime_stalled_cycles_front_stats[cpu]); + total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[cpu])); if (total && avg) { ratio = total / avg; @@ -609,8 +636,10 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) fprintf(stderr, " # %8.3f %% of all cache refs ", ratio); + } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) { + print_stalled_cycles_frontend(cpu, evsel, avg); } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) { - print_stalled_cycles(cpu, evsel, avg); + print_stalled_cycles_backend(cpu, evsel, avg); } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) { total = avg_stats(&runtime_nsecs_stats[cpu]); diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 04d2f0a9667..8a407f3e286 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -60,7 +60,7 @@ static struct event_symbol event_symbols[] = { #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) -static const char *hw_event_names[] = { +static const char *hw_event_names[PERF_COUNT_HW_MAX] = { "cycles", "instructions", "cache-references", @@ -68,10 +68,11 @@ static const char *hw_event_names[] = { "branches", "branch-misses", "bus-cycles", - "stalled-cycles", + "stalled-cycles-frontend", + "stalled-cycles-backend", }; -static const char *sw_event_names[] = { +static const char *sw_event_names[PERF_COUNT_SW_MAX] = { "cpu-clock", "task-clock", "page-faults", -- cgit v1.2.3 From 2b427e14b77dbf3e05f1bd0785f1d07ea5fe924e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 29 Apr 2011 14:16:18 +0200 Subject: perf stat: Adjust stall cycles warning percentages Adjust to color thresholds to better match the percentages seen in real workloads. Both are now a bit more sensitive. Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-7y40wib8n004io7hjpn1dsrm@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index e45449938b8..2492a0efa4d 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -492,11 +492,11 @@ static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __us ratio = avg / total * 100.0; color = PERF_COLOR_NORMAL; - if (ratio > 75.0) + if (ratio > 50.0) color = PERF_COLOR_RED; - else if (ratio > 50.0) + else if (ratio > 30.0) color = PERF_COLOR_MAGENTA; - else if (ratio > 20.0) + else if (ratio > 10.0) color = PERF_COLOR_YELLOW; fprintf(stderr, " # "); @@ -519,7 +519,7 @@ static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __use color = PERF_COLOR_RED; else if (ratio > 50.0) color = PERF_COLOR_MAGENTA; - else if (ratio > 25.0) + else if (ratio > 20.0) color = PERF_COLOR_YELLOW; fprintf(stderr, " # "); -- cgit v1.2.3 From fce3c786d3a49eff397583b4b62fa38df90db937 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 30 Apr 2011 09:03:15 +0200 Subject: perf stat: Leave more room for percentages Triple digit percentages do not fit otherwise. Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-7y40wib8n005io7hjpn1dsrm@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 2492a0efa4d..9e596ab98d0 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -499,8 +499,8 @@ static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __us else if (ratio > 10.0) color = PERF_COLOR_YELLOW; - fprintf(stderr, " # "); - color_fprintf(stderr, color, "%5.2f%%", ratio); + fprintf(stderr, " # "); + color_fprintf(stderr, color, "%6.2f%%", ratio); fprintf(stderr, " frontend cycles idle "); } @@ -522,9 +522,9 @@ static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __use else if (ratio > 20.0) color = PERF_COLOR_YELLOW; - fprintf(stderr, " # "); - color_fprintf(stderr, color, "%5.2f%%", ratio); - fprintf(stderr, " backend cycles idle "); + fprintf(stderr, " # "); + color_fprintf(stderr, color, "%6.2f%%", ratio); + fprintf(stderr, " backend cycles idle "); } static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double avg) @@ -545,8 +545,8 @@ static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double else if (ratio > 5.0) color = PERF_COLOR_YELLOW; - fprintf(stderr, " # "); - color_fprintf(stderr, color, "%5.2f%%", ratio); + fprintf(stderr, " # "); + color_fprintf(stderr, color, "%6.2f%%", ratio); fprintf(stderr, " of all branches "); } @@ -568,8 +568,8 @@ static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, dou else if (ratio > 5.0) color = PERF_COLOR_YELLOW; - fprintf(stderr, " # "); - color_fprintf(stderr, color, "%5.2f%%", ratio); + fprintf(stderr, " # "); + color_fprintf(stderr, color, "%6.2f%%", ratio); fprintf(stderr, " of all L1-dcache hits "); } @@ -607,14 +607,14 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) if (total) ratio = avg / total; - fprintf(stderr, " # %4.2f insns per cycle ", ratio); + fprintf(stderr, " # %5.2f insns per cycle ", ratio); total = avg_stats(&runtime_stalled_cycles_front_stats[cpu]); total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[cpu])); if (total && avg) { ratio = total / avg; - fprintf(stderr, "\n # %4.2f stalled cycles per insn", ratio); + fprintf(stderr, "\n # %5.2f stalled cycles per insn", ratio); } } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) && -- cgit v1.2.3 From 370faf1dd0461ad811852c8abbbcd3d73b1e4fc4 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 29 Apr 2011 16:11:03 +0200 Subject: perf stat: Fail softly on unsupported events David Ahern reported this perf stat failure: > # /tmp/build-perf/perf stat -- sleep 1 > Error: stalled-cycles-frontend event is not supported. > Fatal: Not all events could be opened. > > This is a Dell R410 with an E5620 processor. Fail in a softer fashion on unknown/unsupported events. Reported-by: David Ahern Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-7y40wib8n006io7hjpn1dsrm@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 9e596ab98d0..c8b535bc27b 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -377,7 +377,7 @@ static int run_perf_stat(int argc __used, const char **argv) list_for_each_entry(counter, &evsel_list->entries, node) { if (create_perf_stat_counter(counter) < 0) { - if (errno == EINVAL || errno == ENOSYS) + if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) continue; if (errno == EPERM || errno == EACCES) { @@ -385,8 +385,6 @@ static int run_perf_stat(int argc __used, const char **argv) "\t Consider tweaking" " /proc/sys/kernel/perf_event_paranoid or running as root.", system_wide ? "system-wide " : ""); - } else if (errno == ENOENT) { - error("%s event is not supported. ", event_name(counter)); } else { error("open_counter returned with %d (%s). " "/bin/dmesg may provide additional information.\n", -- cgit v1.2.3 From 301120396b766ae4480e52ece220516a1707822b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 30 Apr 2011 09:14:54 +0200 Subject: perf events, x86: Add Westmere stalled-cycles-frontend/backend events Extend the Intel Westmere PMU driver with definitions for generic front-end and back-end stall events. ( These are only approximations. ) Reported-by: David Ahern Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-7y40wib8n008io7hjpn1dsrm@git.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 7983b9a9533..be8363adf4e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1458,6 +1458,12 @@ static __init int intel_pmu_init(void) x86_pmu.enable_all = intel_pmu_nhm_enable_all; x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; x86_pmu.extra_regs = intel_westmere_extra_regs; + + /* UOPS_ISSUED.STALLED_CYCLES */ + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; + /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */ + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x1803fb1; + pr_cont("Westmere events, "); break; -- cgit v1.2.3 From 947b4ad1d198b7303ecc961f4939a331be0c48f0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 29 Apr 2011 22:52:42 +0200 Subject: perf list: Fix max event string size Recent stalled-cycles event names were larger than the 40 chars printout used by perf list. Extend that, make it robust for future extensions and also adjust alignments in face of wider event names. Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/n/tip-7y40wib8n009io7hjpn1dsrm@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/util/parse-events.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 8a407f3e286..ffa493a2433 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -929,7 +929,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob) snprintf(evt_path, MAXPATHLEN, "%s:%s", sys_dirent.d_name, evt_dirent.d_name); - printf(" %-42s [%s]\n", evt_path, + printf(" %-50s [%s]\n", evt_path, event_type_descriptors[PERF_TYPE_TRACEPOINT]); } closedir(evt_dir); @@ -994,7 +994,7 @@ void print_events_type(u8 type) else snprintf(name, sizeof(name), "%s", syms->symbol); - printf(" %-42s [%s]\n", name, + printf(" %-50s [%s]\n", name, event_type_descriptors[type]); } } @@ -1012,11 +1012,10 @@ int print_hwcache_events(const char *event_glob) for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { char *name = event_cache_name(type, op, i); - if (event_glob != NULL && - !strglobmatch(name, event_glob)) + if (event_glob != NULL && !strglobmatch(name, event_glob)) continue; - printf(" %-42s [%s]\n", name, + printf(" %-50s [%s]\n", name, event_type_descriptors[PERF_TYPE_HW_CACHE]); ++printed; } @@ -1026,14 +1025,16 @@ int print_hwcache_events(const char *event_glob) return printed; } +#define MAX_NAME_LEN 100 + /* * Print the help text for the event symbols: */ void print_events(const char *event_glob) { - struct event_symbol *syms = event_symbols; unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0; - char name[40]; + struct event_symbol *syms = event_symbols; + char name[MAX_NAME_LEN]; printf("\n"); printf("List of pre-defined events (to be used in -e):\n"); @@ -1053,10 +1054,10 @@ void print_events(const char *event_glob) continue; if (strlen(syms->alias)) - sprintf(name, "%s OR %s", syms->symbol, syms->alias); + snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias); else - strcpy(name, syms->symbol); - printf(" %-42s [%s]\n", name, + strncpy(name, syms->symbol, MAX_NAME_LEN); + printf(" %-50s [%s]\n", name, event_type_descriptors[type]); prev_type = type; @@ -1073,12 +1074,12 @@ void print_events(const char *event_glob) return; printf("\n"); - printf(" %-42s [%s]\n", + printf(" %-50s [%s]\n", "rNNN (see 'perf list --help' on how to encode it)", event_type_descriptors[PERF_TYPE_RAW]); printf("\n"); - printf(" %-42s [%s]\n", + printf(" %-50s [%s]\n", "mem:[:access]", event_type_descriptors[PERF_TYPE_BREAKPOINT]); printf("\n"); -- cgit v1.2.3 From c63ca0c01d73563d4e2ab174bb3dd1e5efb907e6 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Fri, 29 Apr 2011 16:04:15 -0600 Subject: perf stat: Tell user about unsupported events in the list Similar to perf-record, tell user about unsupported events that will not be counted if invoked in verbose mode. e.g., $ perf stat -e dTLB-prefetch-misses -v -- sleep 1 dTLB-prefetch-misses event is not supported by the kernel. dTLB-prefetch-misses: 0 0 0 Performance counter stats for 'sleep 1': dTLB-prefetch-misses 1.001884783 seconds time elapsed Signed-off-by: David Ahern Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1304114655-10600-1-git-send-email-dsahern@gmail.com Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index c8b535bc27b..602c3c96fa1 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -377,8 +377,12 @@ static int run_perf_stat(int argc __used, const char **argv) list_for_each_entry(counter, &evsel_list->entries, node) { if (create_perf_stat_counter(counter) < 0) { - if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) + if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) { + if (verbose) + ui__warning("%s event is not supported by the kernel.\n", + event_name(counter)); continue; + } if (errno == EPERM || errno == EACCES) { error("You may not have permission to collect %sstats.\n" -- cgit v1.2.3 From 058e297d34a404caaa5ed277de15698d8dc43000 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 29 Apr 2011 22:35:33 -0400 Subject: ftrace: Only update the function code on write to filter files If function tracing is enabled, a read of the filter files will cause the call to stop_machine to update the function trace sites. It should only call stop_machine on write. Cc: stable@kernel.org Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index ee24fa1935a..666880d051e 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2413,14 +2413,16 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) ftrace_match_records(parser->buffer, parser->idx, enable); } - mutex_lock(&ftrace_lock); - if (ftrace_start_up && ftrace_enabled) - ftrace_run_update_code(FTRACE_ENABLE_CALLS); - mutex_unlock(&ftrace_lock); - trace_parser_put(parser); kfree(iter); + if (file->f_mode & FMODE_WRITE) { + mutex_lock(&ftrace_lock); + if (ftrace_start_up && ftrace_enabled) + ftrace_run_update_code(FTRACE_ENABLE_CALLS); + mutex_unlock(&ftrace_lock); + } + mutex_unlock(&ftrace_regex_lock); return 0; } -- cgit v1.2.3 From 0778d9ad33898faab7bf6316108b471790376e35 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 29 Apr 2011 10:36:31 -0400 Subject: ftrace: Make FTRACE_WARN_ON() work in if condition Let FTRACE_WARN_ON() be used as a stand alone statement or inside a conditional: if (FTRACE_WARN_ON(x)) Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index ee24fa1935a..4ff65599c97 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -39,16 +39,20 @@ #include "trace_stat.h" #define FTRACE_WARN_ON(cond) \ - do { \ - if (WARN_ON(cond)) \ + ({ \ + int ___r = cond; \ + if (WARN_ON(___r)) \ ftrace_kill(); \ - } while (0) + ___r; \ + }) #define FTRACE_WARN_ON_ONCE(cond) \ - do { \ - if (WARN_ON_ONCE(cond)) \ + ({ \ + int ___r = cond; \ + if (WARN_ON_ONCE(___r)) \ ftrace_kill(); \ - } while (0) + ___r; \ + }) /* hash bits for specific function selection */ #define FTRACE_HASH_BITS 7 -- cgit v1.2.3 From 8ab2b7efd3e2ccf2c2dda3206b8171ecdbd0af40 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 21 Apr 2011 22:41:35 -0400 Subject: ftrace: Remove unnecessary disabling of irqs The disabling of interrupts around ftrace_update_code() was used to protect against the evil ftrace daemon from years past. But that daemon has long been killed. It is safe to keep interrupts enabled while updating the initial mcount into nops. The ftrace_mutex is also held which keeps other users at bay. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 4ff65599c97..f199fb2e1d2 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2707,7 +2707,6 @@ static int ftrace_process_locs(struct module *mod, { unsigned long *p; unsigned long addr; - unsigned long flags; mutex_lock(&ftrace_lock); p = start; @@ -2724,10 +2723,7 @@ static int ftrace_process_locs(struct module *mod, ftrace_record_ip(addr); } - /* disable interrupts to prevent kstop machine */ - local_irq_save(flags); ftrace_update_code(mod); - local_irq_restore(flags); mutex_unlock(&ftrace_lock); return 0; -- cgit v1.2.3 From 3499e461147636bf55c41128d83b679ac6ab2d86 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 21 Apr 2011 22:59:12 -0400 Subject: ftrace: Remove failures file The failures file in the debugfs tracing directory would list the functions that failed to convert when the old dead ftrace daemon tried to update code but failed. Since this code is now dead along with the daemon the failures file is useless. Remove it. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 39 ++------------------------------------- 1 file changed, 2 insertions(+), 37 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f199fb2e1d2..97b30f81864 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1355,9 +1355,8 @@ static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) enum { FTRACE_ITER_FILTER = (1 << 0), FTRACE_ITER_NOTRACE = (1 << 1), - FTRACE_ITER_FAILURES = (1 << 2), - FTRACE_ITER_PRINTALL = (1 << 3), - FTRACE_ITER_HASH = (1 << 4), + FTRACE_ITER_PRINTALL = (1 << 2), + FTRACE_ITER_HASH = (1 << 3), }; #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ @@ -1487,12 +1486,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos) rec = &iter->pg->records[iter->idx++]; if ((rec->flags & FTRACE_FL_FREE) || - (!(iter->flags & FTRACE_ITER_FAILURES) && - (rec->flags & FTRACE_FL_FAILED)) || - - ((iter->flags & FTRACE_ITER_FAILURES) && - !(rec->flags & FTRACE_FL_FAILED)) || - ((iter->flags & FTRACE_ITER_FILTER) && !(rec->flags & FTRACE_FL_FILTER)) || @@ -1633,24 +1626,6 @@ ftrace_avail_open(struct inode *inode, struct file *file) return ret; } -static int -ftrace_failures_open(struct inode *inode, struct file *file) -{ - int ret; - struct seq_file *m; - struct ftrace_iterator *iter; - - ret = ftrace_avail_open(inode, file); - if (!ret) { - m = file->private_data; - iter = m->private; - iter->flags = FTRACE_ITER_FAILURES; - } - - return ret; -} - - static void ftrace_filter_reset(int enable) { struct ftrace_page *pg; @@ -2448,13 +2423,6 @@ static const struct file_operations ftrace_avail_fops = { .release = seq_release_private, }; -static const struct file_operations ftrace_failures_fops = { - .open = ftrace_failures_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release_private, -}; - static const struct file_operations ftrace_filter_fops = { .open = ftrace_filter_open, .read = seq_read, @@ -2683,9 +2651,6 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) trace_create_file("available_filter_functions", 0444, d_tracer, NULL, &ftrace_avail_fops); - trace_create_file("failures", 0444, - d_tracer, NULL, &ftrace_failures_fops); - trace_create_file("set_ftrace_filter", 0644, d_tracer, NULL, &ftrace_filter_fops); -- cgit v1.2.3 From 45a4a2372b364107cabea79f255b333236626416 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 21 Apr 2011 23:16:46 -0400 Subject: ftrace: Remove FTRACE_FL_FAILED flag Since we disable all function tracer processing if we detect that a modification of a instruction had failed, we do not need to track that the record has failed. No more ftrace processing is allowed, and the FTRACE_FL_FAILED flag is pointless. Removing this flag simplifies some of the code, but some ftrace_disabled checks needed to be added or move around a little. Signed-off-by: Steven Rostedt --- include/linux/ftrace.h | 9 +++--- kernel/trace/ftrace.c | 76 +++++++++++++++++++++++++++++++------------------- 2 files changed, 51 insertions(+), 34 deletions(-) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index ca29e03c1fa..2a195ffd426 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -147,11 +147,10 @@ extern int ftrace_text_reserved(void *start, void *end); enum { FTRACE_FL_FREE = (1 << 0), - FTRACE_FL_FAILED = (1 << 1), - FTRACE_FL_FILTER = (1 << 2), - FTRACE_FL_ENABLED = (1 << 3), - FTRACE_FL_NOTRACE = (1 << 4), - FTRACE_FL_CONVERTED = (1 << 5), + FTRACE_FL_FILTER = (1 << 1), + FTRACE_FL_ENABLED = (1 << 2), + FTRACE_FL_NOTRACE = (1 << 3), + FTRACE_FL_CONVERTED = (1 << 4), }; struct dyn_ftrace { diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 97b30f81864..eb19fae2c54 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1083,19 +1083,20 @@ static void ftrace_replace_code(int enable) struct ftrace_page *pg; int failed; + if (unlikely(ftrace_disabled)) + return; + do_for_each_ftrace_rec(pg, rec) { /* * Skip over free records, records that have * failed and not converted. */ if (rec->flags & FTRACE_FL_FREE || - rec->flags & FTRACE_FL_FAILED || !(rec->flags & FTRACE_FL_CONVERTED)) continue; failed = __ftrace_replace_code(rec, enable); if (failed) { - rec->flags |= FTRACE_FL_FAILED; ftrace_bug(failed, rec->ip); /* Stop processing */ return; @@ -1111,10 +1112,12 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) ip = rec->ip; + if (unlikely(ftrace_disabled)) + return 0; + ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); if (ret) { ftrace_bug(ret, ip); - rec->flags |= FTRACE_FL_FAILED; return 0; } return 1; @@ -1466,6 +1469,9 @@ t_next(struct seq_file *m, void *v, loff_t *pos) struct ftrace_iterator *iter = m->private; struct dyn_ftrace *rec = NULL; + if (unlikely(ftrace_disabled)) + return NULL; + if (iter->flags & FTRACE_ITER_HASH) return t_hash_next(m, pos); @@ -1518,6 +1524,10 @@ static void *t_start(struct seq_file *m, loff_t *pos) loff_t l; mutex_lock(&ftrace_lock); + + if (unlikely(ftrace_disabled)) + return NULL; + /* * If an lseek was done, then reset and start from beginning. */ @@ -1636,8 +1646,6 @@ static void ftrace_filter_reset(int enable) if (enable) ftrace_filtered = 0; do_for_each_ftrace_rec(pg, rec) { - if (rec->flags & FTRACE_FL_FAILED) - continue; rec->flags &= ~type; } while_for_each_ftrace_rec(); mutex_unlock(&ftrace_lock); @@ -1767,9 +1775,6 @@ static int ftrace_match_records(char *buff, int len, int enable) mutex_lock(&ftrace_lock); do_for_each_ftrace_rec(pg, rec) { - if (rec->flags & FTRACE_FL_FAILED) - continue; - if (ftrace_match_record(rec, search, search_len, type)) { if (not) rec->flags &= ~flag; @@ -1837,10 +1842,11 @@ static int ftrace_match_module_records(char *buff, char *mod, int enable) } mutex_lock(&ftrace_lock); - do_for_each_ftrace_rec(pg, rec) { - if (rec->flags & FTRACE_FL_FAILED) - continue; + if (unlikely(ftrace_disabled)) + goto out_unlock; + + do_for_each_ftrace_rec(pg, rec) { if (ftrace_match_module_record(rec, mod, search, search_len, type)) { @@ -1854,6 +1860,7 @@ static int ftrace_match_module_records(char *buff, char *mod, int enable) ftrace_filtered = 1; } while_for_each_ftrace_rec(); + out_unlock: mutex_unlock(&ftrace_lock); return found; @@ -2008,10 +2015,11 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, return -EINVAL; mutex_lock(&ftrace_lock); - do_for_each_ftrace_rec(pg, rec) { - if (rec->flags & FTRACE_FL_FAILED) - continue; + if (unlikely(ftrace_disabled)) + goto out_unlock; + + do_for_each_ftrace_rec(pg, rec) { if (!ftrace_match_record(rec, search, len, type)) continue; @@ -2218,6 +2226,10 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, mutex_lock(&ftrace_regex_lock); + ret = -ENODEV; + if (unlikely(ftrace_disabled)) + goto out_unlock; + if (file->f_mode & FMODE_READ) { struct seq_file *m = file->private_data; iter = m->private; @@ -2545,9 +2557,6 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) bool exists; int i; - if (ftrace_disabled) - return -ENODEV; - /* decode regex */ type = filter_parse_regex(buffer, strlen(buffer), &search, ¬); if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS) @@ -2556,9 +2565,15 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) search_len = strlen(search); mutex_lock(&ftrace_lock); + + if (unlikely(ftrace_disabled)) { + mutex_unlock(&ftrace_lock); + return -ENODEV; + } + do_for_each_ftrace_rec(pg, rec) { - if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) + if (rec->flags & FTRACE_FL_FREE) continue; if (ftrace_match_record(rec, search, search_len, type)) { @@ -2700,10 +2715,11 @@ void ftrace_release_mod(struct module *mod) struct dyn_ftrace *rec; struct ftrace_page *pg; + mutex_lock(&ftrace_lock); + if (ftrace_disabled) - return; + goto out_unlock; - mutex_lock(&ftrace_lock); do_for_each_ftrace_rec(pg, rec) { if (within_module_core(rec->ip, mod)) { /* @@ -2714,6 +2730,7 @@ void ftrace_release_mod(struct module *mod) ftrace_free_rec(rec); } } while_for_each_ftrace_rec(); + out_unlock: mutex_unlock(&ftrace_lock); } @@ -3108,16 +3125,17 @@ void ftrace_kill(void) */ int register_ftrace_function(struct ftrace_ops *ops) { - int ret; - - if (unlikely(ftrace_disabled)) - return -1; + int ret = -1; mutex_lock(&ftrace_lock); + if (unlikely(ftrace_disabled)) + goto out_unlock; + ret = __register_ftrace_function(ops); ftrace_startup(0); + out_unlock: mutex_unlock(&ftrace_lock); return ret; } @@ -3145,14 +3163,14 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - int ret; - - if (unlikely(ftrace_disabled)) - return -ENODEV; + int ret = -ENODEV; mutex_lock(&ftrace_lock); - ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (unlikely(ftrace_disabled)) + goto out; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) goto out; -- cgit v1.2.3 From d2c8c3eafbf715306ec891e7ca52d3d999acbe31 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 25 Apr 2011 14:32:42 -0400 Subject: ftrace: Remove FTRACE_FL_CONVERTED flag Since we disable all function tracer processing if we detect that a modification of a instruction had failed, we do not need to track that the record has failed. No more ftrace processing is allowed, and the FTRACE_FL_CONVERTED flag is pointless. The FTRACE_FL_CONVERTED flag was used to denote records that were successfully converted from mcount calls into nops. But if a single record fails, all of ftrace is disabled. Signed-off-by: Steven Rostedt --- include/linux/ftrace.h | 1 - kernel/trace/ftrace.c | 12 ++++-------- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 2a195ffd426..32047449b30 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -150,7 +150,6 @@ enum { FTRACE_FL_FILTER = (1 << 1), FTRACE_FL_ENABLED = (1 << 2), FTRACE_FL_NOTRACE = (1 << 3), - FTRACE_FL_CONVERTED = (1 << 4), }; struct dyn_ftrace { diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index eb19fae2c54..9abaaf46f21 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1087,12 +1087,8 @@ static void ftrace_replace_code(int enable) return; do_for_each_ftrace_rec(pg, rec) { - /* - * Skip over free records, records that have - * failed and not converted. - */ - if (rec->flags & FTRACE_FL_FREE || - !(rec->flags & FTRACE_FL_CONVERTED)) + /* Skip over free records */ + if (rec->flags & FTRACE_FL_FREE) continue; failed = __ftrace_replace_code(rec, enable); @@ -1280,10 +1276,10 @@ static int ftrace_update_code(struct module *mod) */ if (!ftrace_code_disable(mod, p)) { ftrace_free_rec(p); - continue; + /* Game over */ + break; } - p->flags |= FTRACE_FL_CONVERTED; ftrace_update_cnt++; /* -- cgit v1.2.3 From 996e87be7f537fb34638875dd37083166c733425 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 26 Apr 2011 16:11:03 -0400 Subject: ftrace: Move record update for normal and modules into a separate function The updating of a function record is moved to a single function. This will allow us to add specific changes in one location for both modules and kernel functions. Later patches will determine if the function record itself needs to be updated (which enables the mcount caller), or just the ftrace_ops needs the update. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 9abaaf46f21..5b758ea344c 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1743,6 +1743,15 @@ static int ftrace_match(char *str, char *regex, int len, int type) return matched; } +static void +update_record(struct dyn_ftrace *rec, unsigned long flag, int not) +{ + if (not) + rec->flags &= ~flag; + else + rec->flags |= flag; +} + static int ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type) { @@ -1772,10 +1781,7 @@ static int ftrace_match_records(char *buff, int len, int enable) do_for_each_ftrace_rec(pg, rec) { if (ftrace_match_record(rec, search, search_len, type)) { - if (not) - rec->flags &= ~flag; - else - rec->flags |= flag; + update_record(rec, flag, not); found = 1; } /* @@ -1846,10 +1852,7 @@ static int ftrace_match_module_records(char *buff, char *mod, int enable) if (ftrace_match_module_record(rec, mod, search, search_len, type)) { - if (not) - rec->flags &= ~flag; - else - rec->flags |= flag; + update_record(rec, flag, not); found = 1; } if (enable && (rec->flags & FTRACE_FL_FILTER)) -- cgit v1.2.3 From 491d0dcfb9707e1f83eff93ca503eb7573162ef2 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 27 Apr 2011 21:43:36 -0400 Subject: ftrace: Consolidate updating of ftrace_trace_function There are three locations that perform almost identical functions in order to update the ftrace_trace_function (the ftrace function variable that gets called by mcount). Consolidate these into a single function called update_ftrace_function(). Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 95 ++++++++++++++++++--------------------------------- 1 file changed, 34 insertions(+), 61 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 5b758ea344c..33bcc71ca09 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -151,6 +151,34 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) } #endif +static void update_ftrace_function(void) +{ + ftrace_func_t func; + + /* + * If there's only one function registered, then call that + * function directly. Otherwise, we need to iterate over the + * registered callers. + */ + if (ftrace_list == &ftrace_list_end || + ftrace_list->next == &ftrace_list_end) + func = ftrace_list->func; + else + func = ftrace_list_func; + + /* If we filter on pids, update to use the pid function */ + if (!list_empty(&ftrace_pids)) { + set_ftrace_pid_function(func); + func = ftrace_pid_func; + } +#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST + ftrace_trace_function = func; +#else + __ftrace_trace_function = func; + ftrace_trace_function = ftrace_test_stop_func; +#endif +} + static int __register_ftrace_function(struct ftrace_ops *ops) { ops->next = ftrace_list; @@ -162,30 +190,8 @@ static int __register_ftrace_function(struct ftrace_ops *ops) */ rcu_assign_pointer(ftrace_list, ops); - if (ftrace_enabled) { - ftrace_func_t func; - - if (ops->next == &ftrace_list_end) - func = ops->func; - else - func = ftrace_list_func; - - if (!list_empty(&ftrace_pids)) { - set_ftrace_pid_function(func); - func = ftrace_pid_func; - } - - /* - * For one func, simply call it directly. - * For more than one func, call the chain. - */ -#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST - ftrace_trace_function = func; -#else - __ftrace_trace_function = func; - ftrace_trace_function = ftrace_test_stop_func; -#endif - } + if (ftrace_enabled) + update_ftrace_function(); return 0; } @@ -213,52 +219,19 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) *p = (*p)->next; - if (ftrace_enabled) { - /* If we only have one func left, then call that directly */ - if (ftrace_list->next == &ftrace_list_end) { - ftrace_func_t func = ftrace_list->func; - - if (!list_empty(&ftrace_pids)) { - set_ftrace_pid_function(func); - func = ftrace_pid_func; - } -#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST - ftrace_trace_function = func; -#else - __ftrace_trace_function = func; -#endif - } - } + if (ftrace_enabled) + update_ftrace_function(); return 0; } static void ftrace_update_pid_func(void) { - ftrace_func_t func; - + /* Only do something if we are tracing something */ if (ftrace_trace_function == ftrace_stub) return; -#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST - func = ftrace_trace_function; -#else - func = __ftrace_trace_function; -#endif - - if (!list_empty(&ftrace_pids)) { - set_ftrace_pid_function(func); - func = ftrace_pid_func; - } else { - if (func == ftrace_pid_func) - func = ftrace_pid_function; - } - -#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST - ftrace_trace_function = func; -#else - __ftrace_trace_function = func; -#endif + update_ftrace_function(); } #ifdef CONFIG_FUNCTION_PROFILER -- cgit v1.2.3 From b9df92d2a94eef8811061aecb1396290df440e2e Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 28 Apr 2011 20:32:08 -0400 Subject: ftrace: Consolidate the function match routines for normal and mods The code used for matching functions is almost identical between normal selecting of functions and using the :mod: feature of set_ftrace_notrace. Consolidate the two users into one function. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 98 +++++++++++++++++++-------------------------------- 1 file changed, 36 insertions(+), 62 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 33bcc71ca09..4f19dbba12f 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1726,34 +1726,52 @@ update_record(struct dyn_ftrace *rec, unsigned long flag, int not) } static int -ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type) +ftrace_match_record(struct dyn_ftrace *rec, char *mod, + char *regex, int len, int type) { char str[KSYM_SYMBOL_LEN]; + char *modname; + + kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); + + if (mod) { + /* module lookup requires matching the module */ + if (!modname || strcmp(modname, mod)) + return 0; + + /* blank search means to match all funcs in the mod */ + if (!len) + return 1; + } - kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); return ftrace_match(str, regex, len, type); } -static int ftrace_match_records(char *buff, int len, int enable) +static int match_records(char *buff, int len, char *mod, int enable, int not) { - unsigned int search_len; + unsigned search_len = 0; struct ftrace_page *pg; struct dyn_ftrace *rec; + int type = MATCH_FULL; + char *search = buff; unsigned long flag; - char *search; - int type; - int not; int found = 0; - flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; - type = filter_parse_regex(buff, len, &search, ¬); + if (len) { + type = filter_parse_regex(buff, len, &search, ¬); + search_len = strlen(search); + } - search_len = strlen(search); + flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; mutex_lock(&ftrace_lock); + + if (unlikely(ftrace_disabled)) + goto out_unlock; + do_for_each_ftrace_rec(pg, rec) { - if (ftrace_match_record(rec, search, search_len, type)) { + if (ftrace_match_record(rec, mod, search, search_len, type)) { update_record(rec, flag, not); found = 1; } @@ -1763,43 +1781,23 @@ static int ftrace_match_records(char *buff, int len, int enable) */ if (enable && (rec->flags & FTRACE_FL_FILTER)) ftrace_filtered = 1; + } while_for_each_ftrace_rec(); + out_unlock: mutex_unlock(&ftrace_lock); return found; } static int -ftrace_match_module_record(struct dyn_ftrace *rec, char *mod, - char *regex, int len, int type) +ftrace_match_records(char *buff, int len, int enable) { - char str[KSYM_SYMBOL_LEN]; - char *modname; - - kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); - - if (!modname || strcmp(modname, mod)) - return 0; - - /* blank search means to match all funcs in the mod */ - if (len) - return ftrace_match(str, regex, len, type); - else - return 1; + return match_records(buff, len, NULL, enable, 0); } static int ftrace_match_module_records(char *buff, char *mod, int enable) { - unsigned search_len = 0; - struct ftrace_page *pg; - struct dyn_ftrace *rec; - int type = MATCH_FULL; - char *search = buff; - unsigned long flag; int not = 0; - int found = 0; - - flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; /* blank or '*' mean the same */ if (strcmp(buff, "*") == 0) @@ -1811,31 +1809,7 @@ static int ftrace_match_module_records(char *buff, char *mod, int enable) not = 1; } - if (strlen(buff)) { - type = filter_parse_regex(buff, strlen(buff), &search, ¬); - search_len = strlen(search); - } - - mutex_lock(&ftrace_lock); - - if (unlikely(ftrace_disabled)) - goto out_unlock; - - do_for_each_ftrace_rec(pg, rec) { - - if (ftrace_match_module_record(rec, mod, - search, search_len, type)) { - update_record(rec, flag, not); - found = 1; - } - if (enable && (rec->flags & FTRACE_FL_FILTER)) - ftrace_filtered = 1; - - } while_for_each_ftrace_rec(); - out_unlock: - mutex_unlock(&ftrace_lock); - - return found; + return match_records(buff, strlen(buff), mod, enable, not); } /* @@ -1993,7 +1967,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, do_for_each_ftrace_rec(pg, rec) { - if (!ftrace_match_record(rec, search, len, type)) + if (!ftrace_match_record(rec, NULL, search, len, type)) continue; entry = kmalloc(sizeof(*entry), GFP_KERNEL); @@ -2548,7 +2522,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) if (rec->flags & FTRACE_FL_FREE) continue; - if (ftrace_match_record(rec, search, search_len, type)) { + if (ftrace_match_record(rec, NULL, search, search_len, type)) { /* if it is in the array */ exists = false; for (i = 0; i < *idx; i++) { -- cgit v1.2.3 From fae85b7c8bcc7de9c0a2698587e20c15beb7d5a6 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 26 Oct 2010 20:24:03 +0200 Subject: perf: Start the restructuring mv kernel/perf_event.c -> kernel/events/core.c. From there, all further sensible splitting can happen. The idea is that due to perf_event.c becoming pretty sizable and with the advent of the marriage with ftrace, splitting functionality into its logical parts should help speeding up the unification and to manage the complexity of the subsystem. Signed-off-by: Borislav Petkov --- kernel/Makefile | 5 +- kernel/events/Makefile | 5 + kernel/events/core.c | 7455 ++++++++++++++++++++++++++++++++++++++++++++++++ kernel/perf_event.c | 7455 ------------------------------------------------ 4 files changed, 7463 insertions(+), 7457 deletions(-) create mode 100644 kernel/events/Makefile create mode 100644 kernel/events/core.c delete mode 100644 kernel/perf_event.c diff --git a/kernel/Makefile b/kernel/Makefile index 85cbfb31e73..79815306474 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -21,7 +21,6 @@ CFLAGS_REMOVE_mutex-debug.o = -pg CFLAGS_REMOVE_rtmutex-debug.o = -pg CFLAGS_REMOVE_cgroup-debug.o = -pg CFLAGS_REMOVE_sched_clock.o = -pg -CFLAGS_REMOVE_perf_event.o = -pg CFLAGS_REMOVE_irq_work.o = -pg endif @@ -103,7 +102,9 @@ obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_TRACEPOINTS) += trace/ obj-$(CONFIG_SMP) += sched_cpupri.o obj-$(CONFIG_IRQ_WORK) += irq_work.o -obj-$(CONFIG_PERF_EVENTS) += perf_event.o + +obj-$(CONFIG_PERF_EVENTS) += events/ + obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o obj-$(CONFIG_PADATA) += padata.o diff --git a/kernel/events/Makefile b/kernel/events/Makefile new file mode 100644 index 00000000000..26c00e4570e --- /dev/null +++ b/kernel/events/Makefile @@ -0,0 +1,5 @@ +ifdef CONFIG_FUNCTION_TRACER +CFLAGS_REMOVE_core.o = -pg +endif + +obj-y += core.o diff --git a/kernel/events/core.c b/kernel/events/core.c new file mode 100644 index 00000000000..440bc485bbf --- /dev/null +++ b/kernel/events/core.c @@ -0,0 +1,7455 @@ +/* + * Performance events core code: + * + * Copyright (C) 2008 Thomas Gleixner + * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra + * Copyright © 2009 Paul Mackerras, IBM Corp. + * + * For licensing details see kernel-base/COPYING + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +struct remote_function_call { + struct task_struct *p; + int (*func)(void *info); + void *info; + int ret; +}; + +static void remote_function(void *data) +{ + struct remote_function_call *tfc = data; + struct task_struct *p = tfc->p; + + if (p) { + tfc->ret = -EAGAIN; + if (task_cpu(p) != smp_processor_id() || !task_curr(p)) + return; + } + + tfc->ret = tfc->func(tfc->info); +} + +/** + * task_function_call - call a function on the cpu on which a task runs + * @p: the task to evaluate + * @func: the function to be called + * @info: the function call argument + * + * Calls the function @func when the task is currently running. This might + * be on the current CPU, which just calls the function directly + * + * returns: @func return value, or + * -ESRCH - when the process isn't running + * -EAGAIN - when the process moved away + */ +static int +task_function_call(struct task_struct *p, int (*func) (void *info), void *info) +{ + struct remote_function_call data = { + .p = p, + .func = func, + .info = info, + .ret = -ESRCH, /* No such (running) process */ + }; + + if (task_curr(p)) + smp_call_function_single(task_cpu(p), remote_function, &data, 1); + + return data.ret; +} + +/** + * cpu_function_call - call a function on the cpu + * @func: the function to be called + * @info: the function call argument + * + * Calls the function @func on the remote cpu. + * + * returns: @func return value or -ENXIO when the cpu is offline + */ +static int cpu_function_call(int cpu, int (*func) (void *info), void *info) +{ + struct remote_function_call data = { + .p = NULL, + .func = func, + .info = info, + .ret = -ENXIO, /* No such CPU */ + }; + + smp_call_function_single(cpu, remote_function, &data, 1); + + return data.ret; +} + +#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ + PERF_FLAG_FD_OUTPUT |\ + PERF_FLAG_PID_CGROUP) + +enum event_type_t { + EVENT_FLEXIBLE = 0x1, + EVENT_PINNED = 0x2, + EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, +}; + +/* + * perf_sched_events : >0 events exist + * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu + */ +struct jump_label_key perf_sched_events __read_mostly; +static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); + +static atomic_t nr_mmap_events __read_mostly; +static atomic_t nr_comm_events __read_mostly; +static atomic_t nr_task_events __read_mostly; + +static LIST_HEAD(pmus); +static DEFINE_MUTEX(pmus_lock); +static struct srcu_struct pmus_srcu; + +/* + * perf event paranoia level: + * -1 - not paranoid at all + * 0 - disallow raw tracepoint access for unpriv + * 1 - disallow cpu events for unpriv + * 2 - disallow kernel profiling for unpriv + */ +int sysctl_perf_event_paranoid __read_mostly = 1; + +/* Minimum for 512 kiB + 1 user control page */ +int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ + +/* + * max perf event sample rate + */ +#define DEFAULT_MAX_SAMPLE_RATE 100000 +int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; +static int max_samples_per_tick __read_mostly = + DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); + +int perf_proc_update_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret = proc_dointvec(table, write, buffer, lenp, ppos); + + if (ret || !write) + return ret; + + max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); + + return 0; +} + +static atomic64_t perf_event_id; + +static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, + enum event_type_t event_type); + +static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, + enum event_type_t event_type, + struct task_struct *task); + +static void update_context_time(struct perf_event_context *ctx); +static u64 perf_event_time(struct perf_event *event); + +void __weak perf_event_print_debug(void) { } + +extern __weak const char *perf_pmu_name(void) +{ + return "pmu"; +} + +static inline u64 perf_clock(void) +{ + return local_clock(); +} + +static inline struct perf_cpu_context * +__get_cpu_context(struct perf_event_context *ctx) +{ + return this_cpu_ptr(ctx->pmu->pmu_cpu_context); +} + +#ifdef CONFIG_CGROUP_PERF + +/* + * Must ensure cgroup is pinned (css_get) before calling + * this function. In other words, we cannot call this function + * if there is no cgroup event for the current CPU context. + */ +static inline struct perf_cgroup * +perf_cgroup_from_task(struct task_struct *task) +{ + return container_of(task_subsys_state(task, perf_subsys_id), + struct perf_cgroup, css); +} + +static inline bool +perf_cgroup_match(struct perf_event *event) +{ + struct perf_event_context *ctx = event->ctx; + struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); + + return !event->cgrp || event->cgrp == cpuctx->cgrp; +} + +static inline void perf_get_cgroup(struct perf_event *event) +{ + css_get(&event->cgrp->css); +} + +static inline void perf_put_cgroup(struct perf_event *event) +{ + css_put(&event->cgrp->css); +} + +static inline void perf_detach_cgroup(struct perf_event *event) +{ + perf_put_cgroup(event); + event->cgrp = NULL; +} + +static inline int is_cgroup_event(struct perf_event *event) +{ + return event->cgrp != NULL; +} + +static inline u64 perf_cgroup_event_time(struct perf_event *event) +{ + struct perf_cgroup_info *t; + + t = per_cpu_ptr(event->cgrp->info, event->cpu); + return t->time; +} + +static inline void __update_cgrp_time(struct perf_cgroup *cgrp) +{ + struct perf_cgroup_info *info; + u64 now; + + now = perf_clock(); + + info = this_cpu_ptr(cgrp->info); + + info->time += now - info->timestamp; + info->timestamp = now; +} + +static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) +{ + struct perf_cgroup *cgrp_out = cpuctx->cgrp; + if (cgrp_out) + __update_cgrp_time(cgrp_out); +} + +static inline void update_cgrp_time_from_event(struct perf_event *event) +{ + struct perf_cgroup *cgrp; + + /* + * ensure we access cgroup data only when needed and + * when we know the cgroup is pinned (css_get) + */ + if (!is_cgroup_event(event)) + return; + + cgrp = perf_cgroup_from_task(current); + /* + * Do not update time when cgroup is not active + */ + if (cgrp == event->cgrp) + __update_cgrp_time(event->cgrp); +} + +static inline void +perf_cgroup_set_timestamp(struct task_struct *task, + struct perf_event_context *ctx) +{ + struct perf_cgroup *cgrp; + struct perf_cgroup_info *info; + + /* + * ctx->lock held by caller + * ensure we do not access cgroup data + * unless we have the cgroup pinned (css_get) + */ + if (!task || !ctx->nr_cgroups) + return; + + cgrp = perf_cgroup_from_task(task); + info = this_cpu_ptr(cgrp->info); + info->timestamp = ctx->timestamp; +} + +#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */ +#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */ + +/* + * reschedule events based on the cgroup constraint of task. + * + * mode SWOUT : schedule out everything + * mode SWIN : schedule in based on cgroup for next + */ +void perf_cgroup_switch(struct task_struct *task, int mode) +{ + struct perf_cpu_context *cpuctx; + struct pmu *pmu; + unsigned long flags; + + /* + * disable interrupts to avoid geting nr_cgroup + * changes via __perf_event_disable(). Also + * avoids preemption. + */ + local_irq_save(flags); + + /* + * we reschedule only in the presence of cgroup + * constrained events. + */ + rcu_read_lock(); + + list_for_each_entry_rcu(pmu, &pmus, entry) { + + cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); + + perf_pmu_disable(cpuctx->ctx.pmu); + + /* + * perf_cgroup_events says at least one + * context on this CPU has cgroup events. + * + * ctx->nr_cgroups reports the number of cgroup + * events for a context. + */ + if (cpuctx->ctx.nr_cgroups > 0) { + + if (mode & PERF_CGROUP_SWOUT) { + cpu_ctx_sched_out(cpuctx, EVENT_ALL); + /* + * must not be done before ctxswout due + * to event_filter_match() in event_sched_out() + */ + cpuctx->cgrp = NULL; + } + + if (mode & PERF_CGROUP_SWIN) { + WARN_ON_ONCE(cpuctx->cgrp); + /* set cgrp before ctxsw in to + * allow event_filter_match() to not + * have to pass task around + */ + cpuctx->cgrp = perf_cgroup_from_task(task); + cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); + } + } + + perf_pmu_enable(cpuctx->ctx.pmu); + } + + rcu_read_unlock(); + + local_irq_restore(flags); +} + +static inline void perf_cgroup_sched_out(struct task_struct *task) +{ + perf_cgroup_switch(task, PERF_CGROUP_SWOUT); +} + +static inline void perf_cgroup_sched_in(struct task_struct *task) +{ + perf_cgroup_switch(task, PERF_CGROUP_SWIN); +} + +static inline int perf_cgroup_connect(int fd, struct perf_event *event, + struct perf_event_attr *attr, + struct perf_event *group_leader) +{ + struct perf_cgroup *cgrp; + struct cgroup_subsys_state *css; + struct file *file; + int ret = 0, fput_needed; + + file = fget_light(fd, &fput_needed); + if (!file) + return -EBADF; + + css = cgroup_css_from_dir(file, perf_subsys_id); + if (IS_ERR(css)) { + ret = PTR_ERR(css); + goto out; + } + + cgrp = container_of(css, struct perf_cgroup, css); + event->cgrp = cgrp; + + /* must be done before we fput() the file */ + perf_get_cgroup(event); + + /* + * all events in a group must monitor + * the same cgroup because a task belongs + * to only one perf cgroup at a time + */ + if (group_leader && group_leader->cgrp != cgrp) { + perf_detach_cgroup(event); + ret = -EINVAL; + } +out: + fput_light(file, fput_needed); + return ret; +} + +static inline void +perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) +{ + struct perf_cgroup_info *t; + t = per_cpu_ptr(event->cgrp->info, event->cpu); + event->shadow_ctx_time = now - t->timestamp; +} + +static inline void +perf_cgroup_defer_enabled(struct perf_event *event) +{ + /* + * when the current task's perf cgroup does not match + * the event's, we need to remember to call the + * perf_mark_enable() function the first time a task with + * a matching perf cgroup is scheduled in. + */ + if (is_cgroup_event(event) && !perf_cgroup_match(event)) + event->cgrp_defer_enabled = 1; +} + +static inline void +perf_cgroup_mark_enabled(struct perf_event *event, + struct perf_event_context *ctx) +{ + struct perf_event *sub; + u64 tstamp = perf_event_time(event); + + if (!event->cgrp_defer_enabled) + return; + + event->cgrp_defer_enabled = 0; + + event->tstamp_enabled = tstamp - event->total_time_enabled; + list_for_each_entry(sub, &event->sibling_list, group_entry) { + if (sub->state >= PERF_EVENT_STATE_INACTIVE) { + sub->tstamp_enabled = tstamp - sub->total_time_enabled; + sub->cgrp_defer_enabled = 0; + } + } +} +#else /* !CONFIG_CGROUP_PERF */ + +static inline bool +perf_cgroup_match(struct perf_event *event) +{ + return true; +} + +static inline void perf_detach_cgroup(struct perf_event *event) +{} + +static inline int is_cgroup_event(struct perf_event *event) +{ + return 0; +} + +static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event) +{ + return 0; +} + +static inline void update_cgrp_time_from_event(struct perf_event *event) +{ +} + +static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) +{ +} + +static inline void perf_cgroup_sched_out(struct task_struct *task) +{ +} + +static inline void perf_cgroup_sched_in(struct task_struct *task) +{ +} + +static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, + struct perf_event_attr *attr, + struct perf_event *group_leader) +{ + return -EINVAL; +} + +static inline void +perf_cgroup_set_timestamp(struct task_struct *task, + struct perf_event_context *ctx) +{ +} + +void +perf_cgroup_switch(struct task_struct *task, struct task_struct *next) +{ +} + +static inline void +perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) +{ +} + +static inline u64 perf_cgroup_event_time(struct perf_event *event) +{ + return 0; +} + +static inline void +perf_cgroup_defer_enabled(struct perf_event *event) +{ +} + +static inline void +perf_cgroup_mark_enabled(struct perf_event *event, + struct perf_event_context *ctx) +{ +} +#endif + +void perf_pmu_disable(struct pmu *pmu) +{ + int *count = this_cpu_ptr(pmu->pmu_disable_count); + if (!(*count)++) + pmu->pmu_disable(pmu); +} + +void perf_pmu_enable(struct pmu *pmu) +{ + int *count = this_cpu_ptr(pmu->pmu_disable_count); + if (!--(*count)) + pmu->pmu_enable(pmu); +} + +static DEFINE_PER_CPU(struct list_head, rotation_list); + +/* + * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized + * because they're strictly cpu affine and rotate_start is called with IRQs + * disabled, while rotate_context is called from IRQ context. + */ +static void perf_pmu_rotate_start(struct pmu *pmu) +{ + struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); + struct list_head *head = &__get_cpu_var(rotation_list); + + WARN_ON(!irqs_disabled()); + + if (list_empty(&cpuctx->rotation_list)) + list_add(&cpuctx->rotation_list, head); +} + +static void get_ctx(struct perf_event_context *ctx) +{ + WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); +} + +static void free_ctx(struct rcu_head *head) +{ + struct perf_event_context *ctx; + + ctx = container_of(head, struct perf_event_context, rcu_head); + kfree(ctx); +} + +static void put_ctx(struct perf_event_context *ctx) +{ + if (atomic_dec_and_test(&ctx->refcount)) { + if (ctx->parent_ctx) + put_ctx(ctx->parent_ctx); + if (ctx->task) + put_task_struct(ctx->task); + call_rcu(&ctx->rcu_head, free_ctx); + } +} + +static void unclone_ctx(struct perf_event_context *ctx) +{ + if (ctx->parent_ctx) { + put_ctx(ctx->parent_ctx); + ctx->parent_ctx = NULL; + } +} + +static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) +{ + /* + * only top level events have the pid namespace they were created in + */ + if (event->parent) + event = event->parent; + + return task_tgid_nr_ns(p, event->ns); +} + +static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) +{ + /* + * only top level events have the pid namespace they were created in + */ + if (event->parent) + event = event->parent; + + return task_pid_nr_ns(p, event->ns); +} + +/* + * If we inherit events we want to return the parent event id + * to userspace. + */ +static u64 primary_event_id(struct perf_event *event) +{ + u64 id = event->id; + + if (event->parent) + id = event->parent->id; + + return id; +} + +/* + * Get the perf_event_context for a task and lock it. + * This has to cope with with the fact that until it is locked, + * the context could get moved to another task. + */ +static struct perf_event_context * +perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) +{ + struct perf_event_context *ctx; + + rcu_read_lock(); +retry: + ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); + if (ctx) { + /* + * If this context is a clone of another, it might + * get swapped for another underneath us by + * perf_event_task_sched_out, though the + * rcu_read_lock() protects us from any context + * getting freed. Lock the context and check if it + * got swapped before we could get the lock, and retry + * if so. If we locked the right context, then it + * can't get swapped on us any more. + */ + raw_spin_lock_irqsave(&ctx->lock, *flags); + if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { + raw_spin_unlock_irqrestore(&ctx->lock, *flags); + goto retry; + } + + if (!atomic_inc_not_zero(&ctx->refcount)) { + raw_spin_unlock_irqrestore(&ctx->lock, *flags); + ctx = NULL; + } + } + rcu_read_unlock(); + return ctx; +} + +/* + * Get the context for a task and increment its pin_count so it + * can't get swapped to another task. This also increments its + * reference count so that the context can't get freed. + */ +static struct perf_event_context * +perf_pin_task_context(struct task_struct *task, int ctxn) +{ + struct perf_event_context *ctx; + unsigned long flags; + + ctx = perf_lock_task_context(task, ctxn, &flags); + if (ctx) { + ++ctx->pin_count; + raw_spin_unlock_irqrestore(&ctx->lock, flags); + } + return ctx; +} + +static void perf_unpin_context(struct perf_event_context *ctx) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&ctx->lock, flags); + --ctx->pin_count; + raw_spin_unlock_irqrestore(&ctx->lock, flags); +} + +/* + * Update the record of the current time in a context. + */ +static void update_context_time(struct perf_event_context *ctx) +{ + u64 now = perf_clock(); + + ctx->time += now - ctx->timestamp; + ctx->timestamp = now; +} + +static u64 perf_event_time(struct perf_event *event) +{ + struct perf_event_context *ctx = event->ctx; + + if (is_cgroup_event(event)) + return perf_cgroup_event_time(event); + + return ctx ? ctx->time : 0; +} + +/* + * Update the total_time_enabled and total_time_running fields for a event. + */ +static void update_event_times(struct perf_event *event) +{ + struct perf_event_context *ctx = event->ctx; + u64 run_end; + + if (event->state < PERF_EVENT_STATE_INACTIVE || + event->group_leader->state < PERF_EVENT_STATE_INACTIVE) + return; + /* + * in cgroup mode, time_enabled represents + * the time the event was enabled AND active + * tasks were in the monitored cgroup. This is + * independent of the activity of the context as + * there may be a mix of cgroup and non-cgroup events. + * + * That is why we treat cgroup events differently + * here. + */ + if (is_cgroup_event(event)) + run_end = perf_event_time(event); + else if (ctx->is_active) + run_end = ctx->time; + else + run_end = event->tstamp_stopped; + + event->total_time_enabled = run_end - event->tstamp_enabled; + + if (event->state == PERF_EVENT_STATE_INACTIVE) + run_end = event->tstamp_stopped; + else + run_end = perf_event_time(event); + + event->total_time_running = run_end - event->tstamp_running; + +} + +/* + * Update total_time_enabled and total_time_running for all events in a group. + */ +static void update_group_times(struct perf_event *leader) +{ + struct perf_event *event; + + update_event_times(leader); + list_for_each_entry(event, &leader->sibling_list, group_entry) + update_event_times(event); +} + +static struct list_head * +ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) +{ + if (event->attr.pinned) + return &ctx->pinned_groups; + else + return &ctx->flexible_groups; +} + +/* + * Add a event from the lists for its context. + * Must be called with ctx->mutex and ctx->lock held. + */ +static void +list_add_event(struct perf_event *event, struct perf_event_context *ctx) +{ + WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); + event->attach_state |= PERF_ATTACH_CONTEXT; + + /* + * If we're a stand alone event or group leader, we go to the context + * list, group events are kept attached to the group so that + * perf_group_detach can, at all times, locate all siblings. + */ + if (event->group_leader == event) { + struct list_head *list; + + if (is_software_event(event)) + event->group_flags |= PERF_GROUP_SOFTWARE; + + list = ctx_group_list(event, ctx); + list_add_tail(&event->group_entry, list); + } + + if (is_cgroup_event(event)) + ctx->nr_cgroups++; + + list_add_rcu(&event->event_entry, &ctx->event_list); + if (!ctx->nr_events) + perf_pmu_rotate_start(ctx->pmu); + ctx->nr_events++; + if (event->attr.inherit_stat) + ctx->nr_stat++; +} + +/* + * Called at perf_event creation and when events are attached/detached from a + * group. + */ +static void perf_event__read_size(struct perf_event *event) +{ + int entry = sizeof(u64); /* value */ + int size = 0; + int nr = 1; + + if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) + size += sizeof(u64); + + if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + size += sizeof(u64); + + if (event->attr.read_format & PERF_FORMAT_ID) + entry += sizeof(u64); + + if (event->attr.read_format & PERF_FORMAT_GROUP) { + nr += event->group_leader->nr_siblings; + size += sizeof(u64); + } + + size += entry * nr; + event->read_size = size; +} + +static void perf_event__header_size(struct perf_event *event) +{ + struct perf_sample_data *data; + u64 sample_type = event->attr.sample_type; + u16 size = 0; + + perf_event__read_size(event); + + if (sample_type & PERF_SAMPLE_IP) + size += sizeof(data->ip); + + if (sample_type & PERF_SAMPLE_ADDR) + size += sizeof(data->addr); + + if (sample_type & PERF_SAMPLE_PERIOD) + size += sizeof(data->period); + + if (sample_type & PERF_SAMPLE_READ) + size += event->read_size; + + event->header_size = size; +} + +static void perf_event__id_header_size(struct perf_event *event) +{ + struct perf_sample_data *data; + u64 sample_type = event->attr.sample_type; + u16 size = 0; + + if (sample_type & PERF_SAMPLE_TID) + size += sizeof(data->tid_entry); + + if (sample_type & PERF_SAMPLE_TIME) + size += sizeof(data->time); + + if (sample_type & PERF_SAMPLE_ID) + size += sizeof(data->id); + + if (sample_type & PERF_SAMPLE_STREAM_ID) + size += sizeof(data->stream_id); + + if (sample_type & PERF_SAMPLE_CPU) + size += sizeof(data->cpu_entry); + + event->id_header_size = size; +} + +static void perf_group_attach(struct perf_event *event) +{ + struct perf_event *group_leader = event->group_leader, *pos; + + /* + * We can have double attach due to group movement in perf_event_open. + */ + if (event->attach_state & PERF_ATTACH_GROUP) + return; + + event->attach_state |= PERF_ATTACH_GROUP; + + if (group_leader == event) + return; + + if (group_leader->group_flags & PERF_GROUP_SOFTWARE && + !is_software_event(event)) + group_leader->group_flags &= ~PERF_GROUP_SOFTWARE; + + list_add_tail(&event->group_entry, &group_leader->sibling_list); + group_leader->nr_siblings++; + + perf_event__header_size(group_leader); + + list_for_each_entry(pos, &group_leader->sibling_list, group_entry) + perf_event__header_size(pos); +} + +/* + * Remove a event from the lists for its context. + * Must be called with ctx->mutex and ctx->lock held. + */ +static void +list_del_event(struct perf_event *event, struct perf_event_context *ctx) +{ + struct perf_cpu_context *cpuctx; + /* + * We can have double detach due to exit/hot-unplug + close. + */ + if (!(event->attach_state & PERF_ATTACH_CONTEXT)) + return; + + event->attach_state &= ~PERF_ATTACH_CONTEXT; + + if (is_cgroup_event(event)) { + ctx->nr_cgroups--; + cpuctx = __get_cpu_context(ctx); + /* + * if there are no more cgroup events + * then cler cgrp to avoid stale pointer + * in update_cgrp_time_from_cpuctx() + */ + if (!ctx->nr_cgroups) + cpuctx->cgrp = NULL; + } + + ctx->nr_events--; + if (event->attr.inherit_stat) + ctx->nr_stat--; + + list_del_rcu(&event->event_entry); + + if (event->group_leader == event) + list_del_init(&event->group_entry); + + update_group_times(event); + + /* + * If event was in error state, then keep it + * that way, otherwise bogus counts will be + * returned on read(). The only way to get out + * of error state is by explicit re-enabling + * of the event + */ + if (event->state > PERF_EVENT_STATE_OFF) + event->state = PERF_EVENT_STATE_OFF; +} + +static void perf_group_detach(struct perf_event *event) +{ + struct perf_event *sibling, *tmp; + struct list_head *list = NULL; + + /* + * We can have double detach due to exit/hot-unplug + close. + */ + if (!(event->attach_state & PERF_ATTACH_GROUP)) + return; + + event->attach_state &= ~PERF_ATTACH_GROUP; + + /* + * If this is a sibling, remove it from its group. + */ + if (event->group_leader != event) { + list_del_init(&event->group_entry); + event->group_leader->nr_siblings--; + goto out; + } + + if (!list_empty(&event->group_entry)) + list = &event->group_entry; + + /* + * If this was a group event with sibling events then + * upgrade the siblings to singleton events by adding them + * to whatever list we are on. + */ + list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { + if (list) + list_move_tail(&sibling->group_entry, list); + sibling->group_leader = sibling; + + /* Inherit group flags from the previous leader */ + sibling->group_flags = event->group_flags; + } + +out: + perf_event__header_size(event->group_leader); + + list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) + perf_event__header_size(tmp); +} + +static inline int +event_filter_match(struct perf_event *event) +{ + return (event->cpu == -1 || event->cpu == smp_processor_id()) + && perf_cgroup_match(event); +} + +static void +event_sched_out(struct perf_event *event, + struct perf_cpu_context *cpuctx, + struct perf_event_context *ctx) +{ + u64 tstamp = perf_event_time(event); + u64 delta; + /* + * An event which could not be activated because of + * filter mismatch still needs to have its timings + * maintained, otherwise bogus information is return + * via read() for time_enabled, time_running: + */ + if (event->state == PERF_EVENT_STATE_INACTIVE + && !event_filter_match(event)) { + delta = tstamp - event->tstamp_stopped; + event->tstamp_running += delta; + event->tstamp_stopped = tstamp; + } + + if (event->state != PERF_EVENT_STATE_ACTIVE) + return; + + event->state = PERF_EVENT_STATE_INACTIVE; + if (event->pending_disable) { + event->pending_disable = 0; + event->state = PERF_EVENT_STATE_OFF; + } + event->tstamp_stopped = tstamp; + event->pmu->del(event, 0); + event->oncpu = -1; + + if (!is_software_event(event)) + cpuctx->active_oncpu--; + ctx->nr_active--; + if (event->attr.exclusive || !cpuctx->active_oncpu) + cpuctx->exclusive = 0; +} + +static void +group_sched_out(struct perf_event *group_event, + struct perf_cpu_context *cpuctx, + struct perf_event_context *ctx) +{ + struct perf_event *event; + int state = group_event->state; + + event_sched_out(group_event, cpuctx, ctx); + + /* + * Schedule out siblings (if any): + */ + list_for_each_entry(event, &group_event->sibling_list, group_entry) + event_sched_out(event, cpuctx, ctx); + + if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) + cpuctx->exclusive = 0; +} + +/* + * Cross CPU call to remove a performance event + * + * We disable the event on the hardware level first. After that we + * remove it from the context list. + */ +static int __perf_remove_from_context(void *info) +{ + struct perf_event *event = info; + struct perf_event_context *ctx = event->ctx; + struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); + + raw_spin_lock(&ctx->lock); + event_sched_out(event, cpuctx, ctx); + list_del_event(event, ctx); + raw_spin_unlock(&ctx->lock); + + return 0; +} + + +/* + * Remove the event from a task's (or a CPU's) list of events. + * + * CPU events are removed with a smp call. For task events we only + * call when the task is on a CPU. + * + * If event->ctx is a cloned context, callers must make sure that + * every task struct that event->ctx->task could possibly point to + * remains valid. This is OK when called from perf_release since + * that only calls us on the top-level context, which can't be a clone. + * When called from perf_event_exit_task, it's OK because the + * context has been detached from its task. + */ +static void perf_remove_from_context(struct perf_event *event) +{ + struct perf_event_context *ctx = event->ctx; + struct task_struct *task = ctx->task; + + lockdep_assert_held(&ctx->mutex); + + if (!task) { + /* + * Per cpu events are removed via an smp call and + * the removal is always successful. + */ + cpu_function_call(event->cpu, __perf_remove_from_context, event); + return; + } + +retry: + if (!task_function_call(task, __perf_remove_from_context, event)) + return; + + raw_spin_lock_irq(&ctx->lock); + /* + * If we failed to find a running task, but find the context active now + * that we've acquired the ctx->lock, retry. + */ + if (ctx->is_active) { + raw_spin_unlock_irq(&ctx->lock); + goto retry; + } + + /* + * Since the task isn't running, its safe to remove the event, us + * holding the ctx->lock ensures the task won't get scheduled in. + */ + list_del_event(event, ctx); + raw_spin_unlock_irq(&ctx->lock); +} + +/* + * Cross CPU call to disable a performance event + */ +static int __perf_event_disable(void *info) +{ + struct perf_event *event = info; + struct perf_event_context *ctx = event->ctx; + struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); + + /* + * If this is a per-task event, need to check whether this + * event's task is the current task on this cpu. + * + * Can trigger due to concurrent perf_event_context_sched_out() + * flipping contexts around. + */ + if (ctx->task && cpuctx->task_ctx != ctx) + return -EINVAL; + + raw_spin_lock(&ctx->lock); + + /* + * If the event is on, turn it off. + * If it is in error state, leave it in error state. + */ + if (event->state >= PERF_EVENT_STATE_INACTIVE) { + update_context_time(ctx); + update_cgrp_time_from_event(event); + update_group_times(event); + if (event == event->group_leader) + group_sched_out(event, cpuctx, ctx); + else + event_sched_out(event, cpuctx, ctx); + event->state = PERF_EVENT_STATE_OFF; + } + + raw_spin_unlock(&ctx->lock); + + return 0; +} + +/* + * Disable a event. + * + * If event->ctx is a cloned context, callers must make sure that + * every task struct that event->ctx->task could possibly point to + * remains valid. This condition is satisifed when called through + * perf_event_for_each_child or perf_event_for_each because they + * hold the top-level event's child_mutex, so any descendant that + * goes to exit will block in sync_child_event. + * When called from perf_pending_event it's OK because event->ctx + * is the current context on this CPU and preemption is disabled, + * hence we can't get into perf_event_task_sched_out for this context. + */ +void perf_event_disable(struct perf_event *event) +{ + struct perf_event_context *ctx = event->ctx; + struct task_struct *task = ctx->task; + + if (!task) { + /* + * Disable the event on the cpu that it's on + */ + cpu_function_call(event->cpu, __perf_event_disable, event); + return; + } + +retry: + if (!task_function_call(task, __perf_event_disable, event)) + return; + + raw_spin_lock_irq(&ctx->lock); + /* + * If the event is still active, we need to retry the cross-call. + */ + if (event->state == PERF_EVENT_STATE_ACTIVE) { + raw_spin_unlock_irq(&ctx->lock); + /* + * Reload the task pointer, it might have been changed by + * a concurrent perf_event_context_sched_out(). + */ + task = ctx->task; + goto retry; + } + + /* + * Since we have the lock this context can't be scheduled + * in, so we can change the state safely. + */ + if (event->state == PERF_EVENT_STATE_INACTIVE) { + update_group_times(event); + event->state = PERF_EVENT_STATE_OFF; + } + raw_spin_unlock_irq(&ctx->lock); +} + +static void perf_set_shadow_time(struct perf_event *event, + struct perf_event_context *ctx, + u64 tstamp) +{ + /* + * use the correct time source for the time snapshot + * + * We could get by without this by leveraging the + * fact that to get to this function, the caller + * has most likely already called update_context_time() + * and update_cgrp_time_xx() and thus both timestamp + * are identical (or very close). Given that tstamp is, + * already adjusted for cgroup, we could say that: + * tstamp - ctx->timestamp + * is equivalent to + * tstamp - cgrp->timestamp. + * + * Then, in perf_output_read(), the calculation would + * work with no changes because: + * - event is guaranteed scheduled in + * - no scheduled out in between + * - thus the timestamp would be the same + * + * But this is a bit hairy. + * + * So instead, we have an explicit cgroup call to remain + * within the time time source all along. We believe it + * is cleaner and simpler to understand. + */ + if (is_cgroup_event(event)) + perf_cgroup_set_shadow_time(event, tstamp); + else + event->shadow_ctx_time = tstamp - ctx->timestamp; +} + +#define MAX_INTERRUPTS (~0ULL) + +static void perf_log_throttle(struct perf_event *event, int enable); + +static int +event_sched_in(struct perf_event *event, + struct perf_cpu_context *cpuctx, + struct perf_event_context *ctx) +{ + u64 tstamp = perf_event_time(event); + + if (event->state <= PERF_EVENT_STATE_OFF) + return 0; + + event->state = PERF_EVENT_STATE_ACTIVE; + event->oncpu = smp_processor_id(); + + /* + * Unthrottle events, since we scheduled we might have missed several + * ticks already, also for a heavily scheduling task there is little + * guarantee it'll get a tick in a timely manner. + */ + if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { + perf_log_throttle(event, 1); + event->hw.interrupts = 0; + } + + /* + * The new state must be visible before we turn it on in the hardware: + */ + smp_wmb(); + + if (event->pmu->add(event, PERF_EF_START)) { + event->state = PERF_EVENT_STATE_INACTIVE; + event->oncpu = -1; + return -EAGAIN; + } + + event->tstamp_running += tstamp - event->tstamp_stopped; + + perf_set_shadow_time(event, ctx, tstamp); + + if (!is_software_event(event)) + cpuctx->active_oncpu++; + ctx->nr_active++; + + if (event->attr.exclusive) + cpuctx->exclusive = 1; + + return 0; +} + +static int +group_sched_in(struct perf_event *group_event, + struct perf_cpu_context *cpuctx, + struct perf_event_context *ctx) +{ + struct perf_event *event, *partial_group = NULL; + struct pmu *pmu = group_event->pmu; + u64 now = ctx->time; + bool simulate = false; + + if (group_event->state == PERF_EVENT_STATE_OFF) + return 0; + + pmu->start_txn(pmu); + + if (event_sched_in(group_event, cpuctx, ctx)) { + pmu->cancel_txn(pmu); + return -EAGAIN; + } + + /* + * Schedule in siblings as one group (if any): + */ + list_for_each_entry(event, &group_event->sibling_list, group_entry) { + if (event_sched_in(event, cpuctx, ctx)) { + partial_group = event; + goto group_error; + } + } + + if (!pmu->commit_txn(pmu)) + return 0; + +group_error: + /* + * Groups can be scheduled in as one unit only, so undo any + * partial group before returning: + * The events up to the failed event are scheduled out normally, + * tstamp_stopped will be updated. + * + * The failed events and the remaining siblings need to have + * their timings updated as if they had gone thru event_sched_in() + * and event_sched_out(). This is required to get consistent timings + * across the group. This also takes care of the case where the group + * could never be scheduled by ensuring tstamp_stopped is set to mark + * the time the event was actually stopped, such that time delta + * calculation in update_event_times() is correct. + */ + list_for_each_entry(event, &group_event->sibling_list, group_entry) { + if (event == partial_group) + simulate = true; + + if (simulate) { + event->tstamp_running += now - event->tstamp_stopped; + event->tstamp_stopped = now; + } else { + event_sched_out(event, cpuctx, ctx); + } + } + event_sched_out(group_event, cpuctx, ctx); + + pmu->cancel_txn(pmu); + + return -EAGAIN; +} + +/* + * Work out whether we can put this event group on the CPU now. + */ +static int group_can_go_on(struct perf_event *event, + struct perf_cpu_context *cpuctx, + int can_add_hw) +{ + /* + * Groups consisting entirely of software events can always go on. + */ + if (event->group_flags & PERF_GROUP_SOFTWARE) + return 1; + /* + * If an exclusive group is already on, no other hardware + * events can go on. + */ + if (cpuctx->exclusive) + return 0; + /* + * If this group is exclusive and there are already + * events on the CPU, it can't go on. + */ + if (event->attr.exclusive && cpuctx->active_oncpu) + return 0; + /* + * Otherwise, try to add it if all previous groups were able + * to go on. + */ + return can_add_hw; +} + +static void add_event_to_ctx(struct perf_event *event, + struct perf_event_context *ctx) +{ + u64 tstamp = perf_event_time(event); + + list_add_event(event, ctx); + perf_group_attach(event); + event->tstamp_enabled = tstamp; + event->tstamp_running = tstamp; + event->tstamp_stopped = tstamp; +} + +static void perf_event_context_sched_in(struct perf_event_context *ctx, + struct task_struct *tsk); + +/* + * Cross CPU call to install and enable a performance event + * + * Must be called with ctx->mutex held + */ +static int __perf_install_in_context(void *info) +{ + struct perf_event *event = info; + struct perf_event_context *ctx = event->ctx; + struct perf_event *leader = event->group_leader; + struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); + int err; + + /* + * In case we're installing a new context to an already running task, + * could also happen before perf_event_task_sched_in() on architectures + * which do context switches with IRQs enabled. + */ + if (ctx->task && !cpuctx->task_ctx) + perf_event_context_sched_in(ctx, ctx->task); + + raw_spin_lock(&ctx->lock); + ctx->is_active = 1; + update_context_time(ctx); + /* + * update cgrp time only if current cgrp + * matches event->cgrp. Must be done before + * calling add_event_to_ctx() + */ + update_cgrp_time_from_event(event); + + add_event_to_ctx(event, ctx); + + if (!event_filter_match(event)) + goto unlock; + + /* + * Don't put the event on if it is disabled or if + * it is in a group and the group isn't on. + */ + if (event->state != PERF_EVENT_STATE_INACTIVE || + (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)) + goto unlock; + + /* + * An exclusive event can't go on if there are already active + * hardware events, and no hardware event can go on if there + * is already an exclusive event on. + */ + if (!group_can_go_on(event, cpuctx, 1)) + err = -EEXIST; + else + err = event_sched_in(event, cpuctx, ctx); + + if (err) { + /* + * This event couldn't go on. If it is in a group + * then we have to pull the whole group off. + * If the event group is pinned then put it in error state. + */ + if (leader != event) + group_sched_out(leader, cpuctx, ctx); + if (leader->attr.pinned) { + update_group_times(leader); + leader->state = PERF_EVENT_STATE_ERROR; + } + } + +unlock: + raw_spin_unlock(&ctx->lock); + + return 0; +} + +/* + * Attach a performance event to a context + * + * First we add the event to the list with the hardware enable bit + * in event->hw_config cleared. + * + * If the event is attached to a task which is on a CPU we use a smp + * call to enable it in the task context. The task might have been + * scheduled away, but we check this in the smp call again. + */ +static void +perf_install_in_context(struct perf_event_context *ctx, + struct perf_event *event, + int cpu) +{ + struct task_struct *task = ctx->task; + + lockdep_assert_held(&ctx->mutex); + + event->ctx = ctx; + + if (!task) { + /* + * Per cpu events are installed via an smp call and + * the install is always successful. + */ + cpu_function_call(cpu, __perf_install_in_context, event); + return; + } + +retry: + if (!task_function_call(task, __perf_install_in_context, event)) + return; + + raw_spin_lock_irq(&ctx->lock); + /* + * If we failed to find a running task, but find the context active now + * that we've acquired the ctx->lock, retry. + */ + if (ctx->is_active) { + raw_spin_unlock_irq(&ctx->lock); + goto retry; + } + + /* + * Since the task isn't running, its safe to add the event, us holding + * the ctx->lock ensures the task won't get scheduled in. + */ + add_event_to_ctx(event, ctx); + raw_spin_unlock_irq(&ctx->lock); +} + +/* + * Put a event into inactive state and update time fields. + * Enabling the leader of a group effectively enables all + * the group members that aren't explicitly disabled, so we + * have to update their ->tstamp_enabled also. + * Note: this works for group members as well as group leaders + * since the non-leader members' sibling_lists will be empty. + */ +static void __perf_event_mark_enabled(struct perf_event *event, + struct perf_event_context *ctx) +{ + struct perf_event *sub; + u64 tstamp = perf_event_time(event); + + event->state = PERF_EVENT_STATE_INACTIVE; + event->tstamp_enabled = tstamp - event->total_time_enabled; + list_for_each_entry(sub, &event->sibling_list, group_entry) { + if (sub->state >= PERF_EVENT_STATE_INACTIVE) + sub->tstamp_enabled = tstamp - sub->total_time_enabled; + } +} + +/* + * Cross CPU call to enable a performance event + */ +static int __perf_event_enable(void *info) +{ + struct perf_event *event = info; + struct perf_event_context *ctx = event->ctx; + struct perf_event *leader = event->group_leader; + struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); + int err; + + if (WARN_ON_ONCE(!ctx->is_active)) + return -EINVAL; + + raw_spin_lock(&ctx->lock); + update_context_time(ctx); + + if (event->state >= PERF_EVENT_STATE_INACTIVE) + goto unlock; + + /* + * set current task's cgroup time reference point + */ + perf_cgroup_set_timestamp(current, ctx); + + __perf_event_mark_enabled(event, ctx); + + if (!event_filter_match(event)) { + if (is_cgroup_event(event)) + perf_cgroup_defer_enabled(event); + goto unlock; + } + + /* + * If the event is in a group and isn't the group leader, + * then don't put it on unless the group is on. + */ + if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) + goto unlock; + + if (!group_can_go_on(event, cpuctx, 1)) { + err = -EEXIST; + } else { + if (event == leader) + err = group_sched_in(event, cpuctx, ctx); + else + err = event_sched_in(event, cpuctx, ctx); + } + + if (err) { + /* + * If this event can't go on and it's part of a + * group, then the whole group has to come off. + */ + if (leader != event) + group_sched_out(leader, cpuctx, ctx); + if (leader->attr.pinned) { + update_group_times(leader); + leader->state = PERF_EVENT_STATE_ERROR; + } + } + +unlock: + raw_spin_unlock(&ctx->lock); + + return 0; +} + +/* + * Enable a event. + * + * If event->ctx is a cloned context, callers must make sure that + * every task struct that event->ctx->task could possibly point to + * remains valid. This condition is satisfied when called through + * perf_event_for_each_child or perf_event_for_each as described + * for perf_event_disable. + */ +void perf_event_enable(struct perf_event *event) +{ + struct perf_event_context *ctx = event->ctx; + struct task_struct *task = ctx->task; + + if (!task) { + /* + * Enable the event on the cpu that it's on + */ + cpu_function_call(event->cpu, __perf_event_enable, event); + return; + } + + raw_spin_lock_irq(&ctx->lock); + if (event->state >= PERF_EVENT_STATE_INACTIVE) + goto out; + + /* + * If the event is in error state, clear that first. + * That way, if we see the event in error state below, we + * know that it has gone back into error state, as distinct + * from the task having been scheduled away before the + * cross-call arrived. + */ + if (event->state == PERF_EVENT_STATE_ERROR) + event->state = PERF_EVENT_STATE_OFF; + +retry: + if (!ctx->is_active) { + __perf_event_mark_enabled(event, ctx); + goto out; + } + + raw_spin_unlock_irq(&ctx->lock); + + if (!task_function_call(task, __perf_event_enable, event)) + return; + + raw_spin_lock_irq(&ctx->lock); + + /* + * If the context is active and the event is still off, + * we need to retry the cross-call. + */ + if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) { + /* + * task could have been flipped by a concurrent + * perf_event_context_sched_out() + */ + task = ctx->task; + goto retry; + } + +out: + raw_spin_unlock_irq(&ctx->lock); +} + +static int perf_event_refresh(struct perf_event *event, int refresh) +{ + /* + * not supported on inherited events + */ + if (event->attr.inherit || !is_sampling_event(event)) + return -EINVAL; + + atomic_add(refresh, &event->event_limit); + perf_event_enable(event); + + return 0; +} + +static void ctx_sched_out(struct perf_event_context *ctx, + struct perf_cpu_context *cpuctx, + enum event_type_t event_type) +{ + struct perf_event *event; + + raw_spin_lock(&ctx->lock); + perf_pmu_disable(ctx->pmu); + ctx->is_active = 0; + if (likely(!ctx->nr_events)) + goto out; + update_context_time(ctx); + update_cgrp_time_from_cpuctx(cpuctx); + + if (!ctx->nr_active) + goto out; + + if (event_type & EVENT_PINNED) { + list_for_each_entry(event, &ctx->pinned_groups, group_entry) + group_sched_out(event, cpuctx, ctx); + } + + if (event_type & EVENT_FLEXIBLE) { + list_for_each_entry(event, &ctx->flexible_groups, group_entry) + group_sched_out(event, cpuctx, ctx); + } +out: + perf_pmu_enable(ctx->pmu); + raw_spin_unlock(&ctx->lock); +} + +/* + * Test whether two contexts are equivalent, i.e. whether they + * have both been cloned from the same version of the same context + * and they both have the same number of enabled events. + * If the number of enabled events is the same, then the set + * of enabled events should be the same, because these are both + * inherited contexts, therefore we can't access individual events + * in them directly with an fd; we can only enable/disable all + * events via prctl, or enable/disable all events in a family + * via ioctl, which will have the same effect on both contexts. + */ +static int context_equiv(struct perf_event_context *ctx1, + struct perf_event_context *ctx2) +{ + return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx + && ctx1->parent_gen == ctx2->parent_gen + && !ctx1->pin_count && !ctx2->pin_count; +} + +static void __perf_event_sync_stat(struct perf_event *event, + struct perf_event *next_event) +{ + u64 value; + + if (!event->attr.inherit_stat) + return; + + /* + * Update the event value, we cannot use perf_event_read() + * because we're in the middle of a context switch and have IRQs + * disabled, which upsets smp_call_function_single(), however + * we know the event must be on the current CPU, therefore we + * don't need to use it. + */ + switch (event->state) { + case PERF_EVENT_STATE_ACTIVE: + event->pmu->read(event); + /* fall-through */ + + case PERF_EVENT_STATE_INACTIVE: + update_event_times(event); + break; + + default: + break; + } + + /* + * In order to keep per-task stats reliable we need to flip the event + * values when we flip the contexts. + */ + value = local64_read(&next_event->count); + value = local64_xchg(&event->count, value); + local64_set(&next_event->count, value); + + swap(event->total_time_enabled, next_event->total_time_enabled); + swap(event->total_time_running, next_event->total_time_running); + + /* + * Since we swizzled the values, update the user visible data too. + */ + perf_event_update_userpage(event); + perf_event_update_userpage(next_event); +} + +#define list_next_entry(pos, member) \ + list_entry(pos->member.next, typeof(*pos), member) + +static void perf_event_sync_stat(struct perf_event_context *ctx, + struct perf_event_context *next_ctx) +{ + struct perf_event *event, *next_event; + + if (!ctx->nr_stat) + return; + + update_context_time(ctx); + + event = list_first_entry(&ctx->event_list, + struct perf_event, event_entry); + + next_event = list_first_entry(&next_ctx->event_list, + struct perf_event, event_entry); + + while (&event->event_entry != &ctx->event_list && + &next_event->event_entry != &next_ctx->event_list) { + + __perf_event_sync_stat(event, next_event); + + event = list_next_entry(event, event_entry); + next_event = list_next_entry(next_event, event_entry); + } +} + +static void perf_event_context_sched_out(struct task_struct *task, int ctxn, + struct task_struct *next) +{ + struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; + struct perf_event_context *next_ctx; + struct perf_event_context *parent; + struct perf_cpu_context *cpuctx; + int do_switch = 1; + + if (likely(!ctx)) + return; + + cpuctx = __get_cpu_context(ctx); + if (!cpuctx->task_ctx) + return; + + rcu_read_lock(); + parent = rcu_dereference(ctx->parent_ctx); + next_ctx = next->perf_event_ctxp[ctxn]; + if (parent && next_ctx && + rcu_dereference(next_ctx->parent_ctx) == parent) { + /* + * Looks like the two contexts are clones, so we might be + * able to optimize the context switch. We lock both + * contexts and check that they are clones under the + * lock (including re-checking that neither has been + * uncloned in the meantime). It doesn't matter which + * order we take the locks because no other cpu could + * be trying to lock both of these tasks. + */ + raw_spin_lock(&ctx->lock); + raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); + if (context_equiv(ctx, next_ctx)) { + /* + * XXX do we need a memory barrier of sorts + * wrt to rcu_dereference() of perf_event_ctxp + */ + task->perf_event_ctxp[ctxn] = next_ctx; + next->perf_event_ctxp[ctxn] = ctx; + ctx->task = next; + next_ctx->task = task; + do_switch = 0; + + perf_event_sync_stat(ctx, next_ctx); + } + raw_spin_unlock(&next_ctx->lock); + raw_spin_unlock(&ctx->lock); + } + rcu_read_unlock(); + + if (do_switch) { + ctx_sched_out(ctx, cpuctx, EVENT_ALL); + cpuctx->task_ctx = NULL; + } +} + +#define for_each_task_context_nr(ctxn) \ + for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++) + +/* + * Called from scheduler to remove the events of the current task, + * with interrupts disabled. + * + * We stop each event and update the event value in event->count. + * + * This does not protect us against NMI, but disable() + * sets the disabled bit in the control field of event _before_ + * accessing the event control register. If a NMI hits, then it will + * not restart the event. + */ +void __perf_event_task_sched_out(struct task_struct *task, + struct task_struct *next) +{ + int ctxn; + + for_each_task_context_nr(ctxn) + perf_event_context_sched_out(task, ctxn, next); + + /* + * if cgroup events exist on this CPU, then we need + * to check if we have to switch out PMU state. + * cgroup event are system-wide mode only + */ + if (atomic_read(&__get_cpu_var(perf_cgroup_events))) + perf_cgroup_sched_out(task); +} + +static void task_ctx_sched_out(struct perf_event_context *ctx, + enum event_type_t event_type) +{ + struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); + + if (!cpuctx->task_ctx) + return; + + if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) + return; + + ctx_sched_out(ctx, cpuctx, event_type); + cpuctx->task_ctx = NULL; +} + +/* + * Called with IRQs disabled + */ +static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, + enum event_type_t event_type) +{ + ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); +} + +static void +ctx_pinned_sched_in(struct perf_event_context *ctx, + struct perf_cpu_context *cpuctx) +{ + struct perf_event *event; + + list_for_each_entry(event, &ctx->pinned_groups, group_entry) { + if (event->state <= PERF_EVENT_STATE_OFF) + continue; + if (!event_filter_match(event)) + continue; + + /* may need to reset tstamp_enabled */ + if (is_cgroup_event(event)) + perf_cgroup_mark_enabled(event, ctx); + + if (group_can_go_on(event, cpuctx, 1)) + group_sched_in(event, cpuctx, ctx); + + /* + * If this pinned group hasn't been scheduled, + * put it in error state. + */ + if (event->state == PERF_EVENT_STATE_INACTIVE) { + update_group_times(event); + event->state = PERF_EVENT_STATE_ERROR; + } + } +} + +static void +ctx_flexible_sched_in(struct perf_event_context *ctx, + struct perf_cpu_context *cpuctx) +{ + struct perf_event *event; + int can_add_hw = 1; + + list_for_each_entry(event, &ctx->flexible_groups, group_entry) { + /* Ignore events in OFF or ERROR state */ + if (event->state <= PERF_EVENT_STATE_OFF) + continue; + /* + * Listen to the 'cpu' scheduling filter constraint + * of events: + */ + if (!event_filter_match(event)) + continue; + + /* may need to reset tstamp_enabled */ + if (is_cgroup_event(event)) + perf_cgroup_mark_enabled(event, ctx); + + if (group_can_go_on(event, cpuctx, can_add_hw)) { + if (group_sched_in(event, cpuctx, ctx)) + can_add_hw = 0; + } + } +} + +static void +ctx_sched_in(struct perf_event_context *ctx, + struct perf_cpu_context *cpuctx, + enum event_type_t event_type, + struct task_struct *task) +{ + u64 now; + + raw_spin_lock(&ctx->lock); + ctx->is_active = 1; + if (likely(!ctx->nr_events)) + goto out; + + now = perf_clock(); + ctx->timestamp = now; + perf_cgroup_set_timestamp(task, ctx); + /* + * First go through the list and put on any pinned groups + * in order to give them the best chance of going on. + */ + if (event_type & EVENT_PINNED) + ctx_pinned_sched_in(ctx, cpuctx); + + /* Then walk through the lower prio flexible groups */ + if (event_type & EVENT_FLEXIBLE) + ctx_flexible_sched_in(ctx, cpuctx); + +out: + raw_spin_unlock(&ctx->lock); +} + +static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, + enum event_type_t event_type, + struct task_struct *task) +{ + struct perf_event_context *ctx = &cpuctx->ctx; + + ctx_sched_in(ctx, cpuctx, event_type, task); +} + +static void task_ctx_sched_in(struct perf_event_context *ctx, + enum event_type_t event_type) +{ + struct perf_cpu_context *cpuctx; + + cpuctx = __get_cpu_context(ctx); + if (cpuctx->task_ctx == ctx) + return; + + ctx_sched_in(ctx, cpuctx, event_type, NULL); + cpuctx->task_ctx = ctx; +} + +static void perf_event_context_sched_in(struct perf_event_context *ctx, + struct task_struct *task) +{ + struct perf_cpu_context *cpuctx; + + cpuctx = __get_cpu_context(ctx); + if (cpuctx->task_ctx == ctx) + return; + + perf_pmu_disable(ctx->pmu); + /* + * We want to keep the following priority order: + * cpu pinned (that don't need to move), task pinned, + * cpu flexible, task flexible. + */ + cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); + + ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); + cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); + ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); + + cpuctx->task_ctx = ctx; + + /* + * Since these rotations are per-cpu, we need to ensure the + * cpu-context we got scheduled on is actually rotating. + */ + perf_pmu_rotate_start(ctx->pmu); + perf_pmu_enable(ctx->pmu); +} + +/* + * Called from scheduler to add the events of the current task + * with interrupts disabled. + * + * We restore the event value and then enable it. + * + * This does not protect us against NMI, but enable() + * sets the enabled bit in the control field of event _before_ + * accessing the event control register. If a NMI hits, then it will + * keep the event running. + */ +void __perf_event_task_sched_in(struct task_struct *task) +{ + struct perf_event_context *ctx; + int ctxn; + + for_each_task_context_nr(ctxn) { + ctx = task->perf_event_ctxp[ctxn]; + if (likely(!ctx)) + continue; + + perf_event_context_sched_in(ctx, task); + } + /* + * if cgroup events exist on this CPU, then we need + * to check if we have to switch in PMU state. + * cgroup event are system-wide mode only + */ + if (atomic_read(&__get_cpu_var(perf_cgroup_events))) + perf_cgroup_sched_in(task); +} + +static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) +{ + u64 frequency = event->attr.sample_freq; + u64 sec = NSEC_PER_SEC; + u64 divisor, dividend; + + int count_fls, nsec_fls, frequency_fls, sec_fls; + + count_fls = fls64(count); + nsec_fls = fls64(nsec); + frequency_fls = fls64(frequency); + sec_fls = 30; + + /* + * We got @count in @nsec, with a target of sample_freq HZ + * the target period becomes: + * + * @count * 10^9 + * period = ------------------- + * @nsec * sample_freq + * + */ + + /* + * Reduce accuracy by one bit such that @a and @b converge + * to a similar magnitude. + */ +#define REDUCE_FLS(a, b) \ +do { \ + if (a##_fls > b##_fls) { \ + a >>= 1; \ + a##_fls--; \ + } else { \ + b >>= 1; \ + b##_fls--; \ + } \ +} while (0) + + /* + * Reduce accuracy until either term fits in a u64, then proceed with + * the other, so that finally we can do a u64/u64 division. + */ + while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) { + REDUCE_FLS(nsec, frequency); + REDUCE_FLS(sec, count); + } + + if (count_fls + sec_fls > 64) { + divisor = nsec * frequency; + + while (count_fls + sec_fls > 64) { + REDUCE_FLS(count, sec); + divisor >>= 1; + } + + dividend = count * sec; + } else { + dividend = count * sec; + + while (nsec_fls + frequency_fls > 64) { + REDUCE_FLS(nsec, frequency); + dividend >>= 1; + } + + divisor = nsec * frequency; + } + + if (!divisor) + return dividend; + + return div64_u64(dividend, divisor); +} + +static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) +{ + struct hw_perf_event *hwc = &event->hw; + s64 period, sample_period; + s64 delta; + + period = perf_calculate_period(event, nsec, count); + + delta = (s64)(period - hwc->sample_period); + delta = (delta + 7) / 8; /* low pass filter */ + + sample_period = hwc->sample_period + delta; + + if (!sample_period) + sample_period = 1; + + hwc->sample_period = sample_period; + + if (local64_read(&hwc->period_left) > 8*sample_period) { + event->pmu->stop(event, PERF_EF_UPDATE); + local64_set(&hwc->period_left, 0); + event->pmu->start(event, PERF_EF_RELOAD); + } +} + +static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) +{ + struct perf_event *event; + struct hw_perf_event *hwc; + u64 interrupts, now; + s64 delta; + + raw_spin_lock(&ctx->lock); + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { + if (event->state != PERF_EVENT_STATE_ACTIVE) + continue; + + if (!event_filter_match(event)) + continue; + + hwc = &event->hw; + + interrupts = hwc->interrupts; + hwc->interrupts = 0; + + /* + * unthrottle events on the tick + */ + if (interrupts == MAX_INTERRUPTS) { + perf_log_throttle(event, 1); + event->pmu->start(event, 0); + } + + if (!event->attr.freq || !event->attr.sample_freq) + continue; + + event->pmu->read(event); + now = local64_read(&event->count); + delta = now - hwc->freq_count_stamp; + hwc->freq_count_stamp = now; + + if (delta > 0) + perf_adjust_period(event, period, delta); + } + raw_spin_unlock(&ctx->lock); +} + +/* + * Round-robin a context's events: + */ +static void rotate_ctx(struct perf_event_context *ctx) +{ + raw_spin_lock(&ctx->lock); + + /* + * Rotate the first entry last of non-pinned groups. Rotation might be + * disabled by the inheritance code. + */ + if (!ctx->rotate_disable) + list_rotate_left(&ctx->flexible_groups); + + raw_spin_unlock(&ctx->lock); +} + +/* + * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized + * because they're strictly cpu affine and rotate_start is called with IRQs + * disabled, while rotate_context is called from IRQ context. + */ +static void perf_rotate_context(struct perf_cpu_context *cpuctx) +{ + u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC; + struct perf_event_context *ctx = NULL; + int rotate = 0, remove = 1; + + if (cpuctx->ctx.nr_events) { + remove = 0; + if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) + rotate = 1; + } + + ctx = cpuctx->task_ctx; + if (ctx && ctx->nr_events) { + remove = 0; + if (ctx->nr_events != ctx->nr_active) + rotate = 1; + } + + perf_pmu_disable(cpuctx->ctx.pmu); + perf_ctx_adjust_freq(&cpuctx->ctx, interval); + if (ctx) + perf_ctx_adjust_freq(ctx, interval); + + if (!rotate) + goto done; + + cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); + if (ctx) + task_ctx_sched_out(ctx, EVENT_FLEXIBLE); + + rotate_ctx(&cpuctx->ctx); + if (ctx) + rotate_ctx(ctx); + + cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current); + if (ctx) + task_ctx_sched_in(ctx, EVENT_FLEXIBLE); + +done: + if (remove) + list_del_init(&cpuctx->rotation_list); + + perf_pmu_enable(cpuctx->ctx.pmu); +} + +void perf_event_task_tick(void) +{ + struct list_head *head = &__get_cpu_var(rotation_list); + struct perf_cpu_context *cpuctx, *tmp; + + WARN_ON(!irqs_disabled()); + + list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) { + if (cpuctx->jiffies_interval == 1 || + !(jiffies % cpuctx->jiffies_interval)) + perf_rotate_context(cpuctx); + } +} + +static int event_enable_on_exec(struct perf_event *event, + struct perf_event_context *ctx) +{ + if (!event->attr.enable_on_exec) + return 0; + + event->attr.enable_on_exec = 0; + if (event->state >= PERF_EVENT_STATE_INACTIVE) + return 0; + + __perf_event_mark_enabled(event, ctx); + + return 1; +} + +/* + * Enable all of a task's events that have been marked enable-on-exec. + * This expects task == current. + */ +static void perf_event_enable_on_exec(struct perf_event_context *ctx) +{ + struct perf_event *event; + unsigned long flags; + int enabled = 0; + int ret; + + local_irq_save(flags); + if (!ctx || !ctx->nr_events) + goto out; + + /* + * We must ctxsw out cgroup events to avoid conflict + * when invoking perf_task_event_sched_in() later on + * in this function. Otherwise we end up trying to + * ctxswin cgroup events which are already scheduled + * in. + */ + perf_cgroup_sched_out(current); + task_ctx_sched_out(ctx, EVENT_ALL); + + raw_spin_lock(&ctx->lock); + + list_for_each_entry(event, &ctx->pinned_groups, group_entry) { + ret = event_enable_on_exec(event, ctx); + if (ret) + enabled = 1; + } + + list_for_each_entry(event, &ctx->flexible_groups, group_entry) { + ret = event_enable_on_exec(event, ctx); + if (ret) + enabled = 1; + } + + /* + * Unclone this context if we enabled any event. + */ + if (enabled) + unclone_ctx(ctx); + + raw_spin_unlock(&ctx->lock); + + /* + * Also calls ctxswin for cgroup events, if any: + */ + perf_event_context_sched_in(ctx, ctx->task); +out: + local_irq_restore(flags); +} + +/* + * Cross CPU call to read the hardware event + */ +static void __perf_event_read(void *info) +{ + struct perf_event *event = info; + struct perf_event_context *ctx = event->ctx; + struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); + + /* + * If this is a task context, we need to check whether it is + * the current task context of this cpu. If not it has been + * scheduled out before the smp call arrived. In that case + * event->count would have been updated to a recent sample + * when the event was scheduled out. + */ + if (ctx->task && cpuctx->task_ctx != ctx) + return; + + raw_spin_lock(&ctx->lock); + if (ctx->is_active) { + update_context_time(ctx); + update_cgrp_time_from_event(event); + } + update_event_times(event); + if (event->state == PERF_EVENT_STATE_ACTIVE) + event->pmu->read(event); + raw_spin_unlock(&ctx->lock); +} + +static inline u64 perf_event_count(struct perf_event *event) +{ + return local64_read(&event->count) + atomic64_read(&event->child_count); +} + +static u64 perf_event_read(struct perf_event *event) +{ + /* + * If event is enabled and currently active on a CPU, update the + * value in the event structure: + */ + if (event->state == PERF_EVENT_STATE_ACTIVE) { + smp_call_function_single(event->oncpu, + __perf_event_read, event, 1); + } else if (event->state == PERF_EVENT_STATE_INACTIVE) { + struct perf_event_context *ctx = event->ctx; + unsigned long flags; + + raw_spin_lock_irqsave(&ctx->lock, flags); + /* + * may read while context is not active + * (e.g., thread is blocked), in that case + * we cannot update context time + */ + if (ctx->is_active) { + update_context_time(ctx); + update_cgrp_time_from_event(event); + } + update_event_times(event); + raw_spin_unlock_irqrestore(&ctx->lock, flags); + } + + return perf_event_count(event); +} + +/* + * Callchain support + */ + +struct callchain_cpus_entries { + struct rcu_head rcu_head; + struct perf_callchain_entry *cpu_entries[0]; +}; + +static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); +static atomic_t nr_callchain_events; +static DEFINE_MUTEX(callchain_mutex); +struct callchain_cpus_entries *callchain_cpus_entries; + + +__weak void perf_callchain_kernel(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ +} + +__weak void perf_callchain_user(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ +} + +static void release_callchain_buffers_rcu(struct rcu_head *head) +{ + struct callchain_cpus_entries *entries; + int cpu; + + entries = container_of(head, struct callchain_cpus_entries, rcu_head); + + for_each_possible_cpu(cpu) + kfree(entries->cpu_entries[cpu]); + + kfree(entries); +} + +static void release_callchain_buffers(void) +{ + struct callchain_cpus_entries *entries; + + entries = callchain_cpus_entries; + rcu_assign_pointer(callchain_cpus_entries, NULL); + call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); +} + +static int alloc_callchain_buffers(void) +{ + int cpu; + int size; + struct callchain_cpus_entries *entries; + + /* + * We can't use the percpu allocation API for data that can be + * accessed from NMI. Use a temporary manual per cpu allocation + * until that gets sorted out. + */ + size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); + + entries = kzalloc(size, GFP_KERNEL); + if (!entries) + return -ENOMEM; + + size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS; + + for_each_possible_cpu(cpu) { + entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, + cpu_to_node(cpu)); + if (!entries->cpu_entries[cpu]) + goto fail; + } + + rcu_assign_pointer(callchain_cpus_entries, entries); + + return 0; + +fail: + for_each_possible_cpu(cpu) + kfree(entries->cpu_entries[cpu]); + kfree(entries); + + return -ENOMEM; +} + +static int get_callchain_buffers(void) +{ + int err = 0; + int count; + + mutex_lock(&callchain_mutex); + + count = atomic_inc_return(&nr_callchain_events); + if (WARN_ON_ONCE(count < 1)) { + err = -EINVAL; + goto exit; + } + + if (count > 1) { + /* If the allocation failed, give up */ + if (!callchain_cpus_entries) + err = -ENOMEM; + goto exit; + } + + err = alloc_callchain_buffers(); + if (err) + release_callchain_buffers(); +exit: + mutex_unlock(&callchain_mutex); + + return err; +} + +static void put_callchain_buffers(void) +{ + if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) { + release_callchain_buffers(); + mutex_unlock(&callchain_mutex); + } +} + +static int get_recursion_context(int *recursion) +{ + int rctx; + + if (in_nmi()) + rctx = 3; + else if (in_irq()) + rctx = 2; + else if (in_softirq()) + rctx = 1; + else + rctx = 0; + + if (recursion[rctx]) + return -1; + + recursion[rctx]++; + barrier(); + + return rctx; +} + +static inline void put_recursion_context(int *recursion, int rctx) +{ + barrier(); + recursion[rctx]--; +} + +static struct perf_callchain_entry *get_callchain_entry(int *rctx) +{ + int cpu; + struct callchain_cpus_entries *entries; + + *rctx = get_recursion_context(__get_cpu_var(callchain_recursion)); + if (*rctx == -1) + return NULL; + + entries = rcu_dereference(callchain_cpus_entries); + if (!entries) + return NULL; + + cpu = smp_processor_id(); + + return &entries->cpu_entries[cpu][*rctx]; +} + +static void +put_callchain_entry(int rctx) +{ + put_recursion_context(__get_cpu_var(callchain_recursion), rctx); +} + +static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) +{ + int rctx; + struct perf_callchain_entry *entry; + + + entry = get_callchain_entry(&rctx); + if (rctx == -1) + return NULL; + + if (!entry) + goto exit_put; + + entry->nr = 0; + + if (!user_mode(regs)) { + perf_callchain_store(entry, PERF_CONTEXT_KERNEL); + perf_callchain_kernel(entry, regs); + if (current->mm) + regs = task_pt_regs(current); + else + regs = NULL; + } + + if (regs) { + perf_callchain_store(entry, PERF_CONTEXT_USER); + perf_callchain_user(entry, regs); + } + +exit_put: + put_callchain_entry(rctx); + + return entry; +} + +/* + * Initialize the perf_event context in a task_struct: + */ +static void __perf_event_init_context(struct perf_event_context *ctx) +{ + raw_spin_lock_init(&ctx->lock); + mutex_init(&ctx->mutex); + INIT_LIST_HEAD(&ctx->pinned_groups); + INIT_LIST_HEAD(&ctx->flexible_groups); + INIT_LIST_HEAD(&ctx->event_list); + atomic_set(&ctx->refcount, 1); +} + +static struct perf_event_context * +alloc_perf_context(struct pmu *pmu, struct task_struct *task) +{ + struct perf_event_context *ctx; + + ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); + if (!ctx) + return NULL; + + __perf_event_init_context(ctx); + if (task) { + ctx->task = task; + get_task_struct(task); + } + ctx->pmu = pmu; + + return ctx; +} + +static struct task_struct * +find_lively_task_by_vpid(pid_t vpid) +{ + struct task_struct *task; + int err; + + rcu_read_lock(); + if (!vpid) + task = current; + else + task = find_task_by_vpid(vpid); + if (task) + get_task_struct(task); + rcu_read_unlock(); + + if (!task) + return ERR_PTR(-ESRCH); + + /* Reuse ptrace permission checks for now. */ + err = -EACCES; + if (!ptrace_may_access(task, PTRACE_MODE_READ)) + goto errout; + + return task; +errout: + put_task_struct(task); + return ERR_PTR(err); + +} + +/* + * Returns a matching context with refcount and pincount. + */ +static struct perf_event_context * +find_get_context(struct pmu *pmu, struct task_struct *task, int cpu) +{ + struct perf_event_context *ctx; + struct perf_cpu_context *cpuctx; + unsigned long flags; + int ctxn, err; + + if (!task) { + /* Must be root to operate on a CPU event: */ + if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) + return ERR_PTR(-EACCES); + + /* + * We could be clever and allow to attach a event to an + * offline CPU and activate it when the CPU comes up, but + * that's for later. + */ + if (!cpu_online(cpu)) + return ERR_PTR(-ENODEV); + + cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); + ctx = &cpuctx->ctx; + get_ctx(ctx); + ++ctx->pin_count; + + return ctx; + } + + err = -EINVAL; + ctxn = pmu->task_ctx_nr; + if (ctxn < 0) + goto errout; + +retry: + ctx = perf_lock_task_context(task, ctxn, &flags); + if (ctx) { + unclone_ctx(ctx); + ++ctx->pin_count; + raw_spin_unlock_irqrestore(&ctx->lock, flags); + } + + if (!ctx) { + ctx = alloc_perf_context(pmu, task); + err = -ENOMEM; + if (!ctx) + goto errout; + + get_ctx(ctx); + + err = 0; + mutex_lock(&task->perf_event_mutex); + /* + * If it has already passed perf_event_exit_task(). + * we must see PF_EXITING, it takes this mutex too. + */ + if (task->flags & PF_EXITING) + err = -ESRCH; + else if (task->perf_event_ctxp[ctxn]) + err = -EAGAIN; + else { + ++ctx->pin_count; + rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); + } + mutex_unlock(&task->perf_event_mutex); + + if (unlikely(err)) { + put_task_struct(task); + kfree(ctx); + + if (err == -EAGAIN) + goto retry; + goto errout; + } + } + + return ctx; + +errout: + return ERR_PTR(err); +} + +static void perf_event_free_filter(struct perf_event *event); + +static void free_event_rcu(struct rcu_head *head) +{ + struct perf_event *event; + + event = container_of(head, struct perf_event, rcu_head); + if (event->ns) + put_pid_ns(event->ns); + perf_event_free_filter(event); + kfree(event); +} + +static void perf_buffer_put(struct perf_buffer *buffer); + +static void free_event(struct perf_event *event) +{ + irq_work_sync(&event->pending); + + if (!event->parent) { + if (event->attach_state & PERF_ATTACH_TASK) + jump_label_dec(&perf_sched_events); + if (event->attr.mmap || event->attr.mmap_data) + atomic_dec(&nr_mmap_events); + if (event->attr.comm) + atomic_dec(&nr_comm_events); + if (event->attr.task) + atomic_dec(&nr_task_events); + if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) + put_callchain_buffers(); + if (is_cgroup_event(event)) { + atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); + jump_label_dec(&perf_sched_events); + } + } + + if (event->buffer) { + perf_buffer_put(event->buffer); + event->buffer = NULL; + } + + if (is_cgroup_event(event)) + perf_detach_cgroup(event); + + if (event->destroy) + event->destroy(event); + + if (event->ctx) + put_ctx(event->ctx); + + call_rcu(&event->rcu_head, free_event_rcu); +} + +int perf_event_release_kernel(struct perf_event *event) +{ + struct perf_event_context *ctx = event->ctx; + + /* + * Remove from the PMU, can't get re-enabled since we got + * here because the last ref went. + */ + perf_event_disable(event); + + WARN_ON_ONCE(ctx->parent_ctx); + /* + * There are two ways this annotation is useful: + * + * 1) there is a lock recursion from perf_event_exit_task + * see the comment there. + * + * 2) there is a lock-inversion with mmap_sem through + * perf_event_read_group(), which takes faults while + * holding ctx->mutex, however this is called after + * the last filedesc died, so there is no possibility + * to trigger the AB-BA case. + */ + mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING); + raw_spin_lock_irq(&ctx->lock); + perf_group_detach(event); + list_del_event(event, ctx); + raw_spin_unlock_irq(&ctx->lock); + mutex_unlock(&ctx->mutex); + + free_event(event); + + return 0; +} +EXPORT_SYMBOL_GPL(perf_event_release_kernel); + +/* + * Called when the last reference to the file is gone. + */ +static int perf_release(struct inode *inode, struct file *file) +{ + struct perf_event *event = file->private_data; + struct task_struct *owner; + + file->private_data = NULL; + + rcu_read_lock(); + owner = ACCESS_ONCE(event->owner); + /* + * Matches the smp_wmb() in perf_event_exit_task(). If we observe + * !owner it means the list deletion is complete and we can indeed + * free this event, otherwise we need to serialize on + * owner->perf_event_mutex. + */ + smp_read_barrier_depends(); + if (owner) { + /* + * Since delayed_put_task_struct() also drops the last + * task reference we can safely take a new reference + * while holding the rcu_read_lock(). + */ + get_task_struct(owner); + } + rcu_read_unlock(); + + if (owner) { + mutex_lock(&owner->perf_event_mutex); + /* + * We have to re-check the event->owner field, if it is cleared + * we raced with perf_event_exit_task(), acquiring the mutex + * ensured they're done, and we can proceed with freeing the + * event. + */ + if (event->owner) + list_del_init(&event->owner_entry); + mutex_unlock(&owner->perf_event_mutex); + put_task_struct(owner); + } + + return perf_event_release_kernel(event); +} + +u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) +{ + struct perf_event *child; + u64 total = 0; + + *enabled = 0; + *running = 0; + + mutex_lock(&event->child_mutex); + total += perf_event_read(event); + *enabled += event->total_time_enabled + + atomic64_read(&event->child_total_time_enabled); + *running += event->total_time_running + + atomic64_read(&event->child_total_time_running); + + list_for_each_entry(child, &event->child_list, child_list) { + total += perf_event_read(child); + *enabled += child->total_time_enabled; + *running += child->total_time_running; + } + mutex_unlock(&event->child_mutex); + + return total; +} +EXPORT_SYMBOL_GPL(perf_event_read_value); + +static int perf_event_read_group(struct perf_event *event, + u64 read_format, char __user *buf) +{ + struct perf_event *leader = event->group_leader, *sub; + int n = 0, size = 0, ret = -EFAULT; + struct perf_event_context *ctx = leader->ctx; + u64 values[5]; + u64 count, enabled, running; + + mutex_lock(&ctx->mutex); + count = perf_event_read_value(leader, &enabled, &running); + + values[n++] = 1 + leader->nr_siblings; + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) + values[n++] = enabled; + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + values[n++] = running; + values[n++] = count; + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_event_id(leader); + + size = n * sizeof(u64); + + if (copy_to_user(buf, values, size)) + goto unlock; + + ret = size; + + list_for_each_entry(sub, &leader->sibling_list, group_entry) { + n = 0; + + values[n++] = perf_event_read_value(sub, &enabled, &running); + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_event_id(sub); + + size = n * sizeof(u64); + + if (copy_to_user(buf + ret, values, size)) { + ret = -EFAULT; + goto unlock; + } + + ret += size; + } +unlock: + mutex_unlock(&ctx->mutex); + + return ret; +} + +static int perf_event_read_one(struct perf_event *event, + u64 read_format, char __user *buf) +{ + u64 enabled, running; + u64 values[4]; + int n = 0; + + values[n++] = perf_event_read_value(event, &enabled, &running); + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) + values[n++] = enabled; + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + values[n++] = running; + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_event_id(event); + + if (copy_to_user(buf, values, n * sizeof(u64))) + return -EFAULT; + + return n * sizeof(u64); +} + +/* + * Read the performance event - simple non blocking version for now + */ +static ssize_t +perf_read_hw(struct perf_event *event, char __user *buf, size_t count) +{ + u64 read_format = event->attr.read_format; + int ret; + + /* + * Return end-of-file for a read on a event that is in + * error state (i.e. because it was pinned but it couldn't be + * scheduled on to the CPU at some point). + */ + if (event->state == PERF_EVENT_STATE_ERROR) + return 0; + + if (count < event->read_size) + return -ENOSPC; + + WARN_ON_ONCE(event->ctx->parent_ctx); + if (read_format & PERF_FORMAT_GROUP) + ret = perf_event_read_group(event, read_format, buf); + else + ret = perf_event_read_one(event, read_format, buf); + + return ret; +} + +static ssize_t +perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + struct perf_event *event = file->private_data; + + return perf_read_hw(event, buf, count); +} + +static unsigned int perf_poll(struct file *file, poll_table *wait) +{ + struct perf_event *event = file->private_data; + struct perf_buffer *buffer; + unsigned int events = POLL_HUP; + + rcu_read_lock(); + buffer = rcu_dereference(event->buffer); + if (buffer) + events = atomic_xchg(&buffer->poll, 0); + rcu_read_unlock(); + + poll_wait(file, &event->waitq, wait); + + return events; +} + +static void perf_event_reset(struct perf_event *event) +{ + (void)perf_event_read(event); + local64_set(&event->count, 0); + perf_event_update_userpage(event); +} + +/* + * Holding the top-level event's child_mutex means that any + * descendant process that has inherited this event will block + * in sync_child_event if it goes to exit, thus satisfying the + * task existence requirements of perf_event_enable/disable. + */ +static void perf_event_for_each_child(struct perf_event *event, + void (*func)(struct perf_event *)) +{ + struct perf_event *child; + + WARN_ON_ONCE(event->ctx->parent_ctx); + mutex_lock(&event->child_mutex); + func(event); + list_for_each_entry(child, &event->child_list, child_list) + func(child); + mutex_unlock(&event->child_mutex); +} + +static void perf_event_for_each(struct perf_event *event, + void (*func)(struct perf_event *)) +{ + struct perf_event_context *ctx = event->ctx; + struct perf_event *sibling; + + WARN_ON_ONCE(ctx->parent_ctx); + mutex_lock(&ctx->mutex); + event = event->group_leader; + + perf_event_for_each_child(event, func); + func(event); + list_for_each_entry(sibling, &event->sibling_list, group_entry) + perf_event_for_each_child(event, func); + mutex_unlock(&ctx->mutex); +} + +static int perf_event_period(struct perf_event *event, u64 __user *arg) +{ + struct perf_event_context *ctx = event->ctx; + int ret = 0; + u64 value; + + if (!is_sampling_event(event)) + return -EINVAL; + + if (copy_from_user(&value, arg, sizeof(value))) + return -EFAULT; + + if (!value) + return -EINVAL; + + raw_spin_lock_irq(&ctx->lock); + if (event->attr.freq) { + if (value > sysctl_perf_event_sample_rate) { + ret = -EINVAL; + goto unlock; + } + + event->attr.sample_freq = value; + } else { + event->attr.sample_period = value; + event->hw.sample_period = value; + } +unlock: + raw_spin_unlock_irq(&ctx->lock); + + return ret; +} + +static const struct file_operations perf_fops; + +static struct perf_event *perf_fget_light(int fd, int *fput_needed) +{ + struct file *file; + + file = fget_light(fd, fput_needed); + if (!file) + return ERR_PTR(-EBADF); + + if (file->f_op != &perf_fops) { + fput_light(file, *fput_needed); + *fput_needed = 0; + return ERR_PTR(-EBADF); + } + + return file->private_data; +} + +static int perf_event_set_output(struct perf_event *event, + struct perf_event *output_event); +static int perf_event_set_filter(struct perf_event *event, void __user *arg); + +static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct perf_event *event = file->private_data; + void (*func)(struct perf_event *); + u32 flags = arg; + + switch (cmd) { + case PERF_EVENT_IOC_ENABLE: + func = perf_event_enable; + break; + case PERF_EVENT_IOC_DISABLE: + func = perf_event_disable; + break; + case PERF_EVENT_IOC_RESET: + func = perf_event_reset; + break; + + case PERF_EVENT_IOC_REFRESH: + return perf_event_refresh(event, arg); + + case PERF_EVENT_IOC_PERIOD: + return perf_event_period(event, (u64 __user *)arg); + + case PERF_EVENT_IOC_SET_OUTPUT: + { + struct perf_event *output_event = NULL; + int fput_needed = 0; + int ret; + + if (arg != -1) { + output_event = perf_fget_light(arg, &fput_needed); + if (IS_ERR(output_event)) + return PTR_ERR(output_event); + } + + ret = perf_event_set_output(event, output_event); + if (output_event) + fput_light(output_event->filp, fput_needed); + + return ret; + } + + case PERF_EVENT_IOC_SET_FILTER: + return perf_event_set_filter(event, (void __user *)arg); + + default: + return -ENOTTY; + } + + if (flags & PERF_IOC_FLAG_GROUP) + perf_event_for_each(event, func); + else + perf_event_for_each_child(event, func); + + return 0; +} + +int perf_event_task_enable(void) +{ + struct perf_event *event; + + mutex_lock(¤t->perf_event_mutex); + list_for_each_entry(event, ¤t->perf_event_list, owner_entry) + perf_event_for_each_child(event, perf_event_enable); + mutex_unlock(¤t->perf_event_mutex); + + return 0; +} + +int perf_event_task_disable(void) +{ + struct perf_event *event; + + mutex_lock(¤t->perf_event_mutex); + list_for_each_entry(event, ¤t->perf_event_list, owner_entry) + perf_event_for_each_child(event, perf_event_disable); + mutex_unlock(¤t->perf_event_mutex); + + return 0; +} + +#ifndef PERF_EVENT_INDEX_OFFSET +# define PERF_EVENT_INDEX_OFFSET 0 +#endif + +static int perf_event_index(struct perf_event *event) +{ + if (event->hw.state & PERF_HES_STOPPED) + return 0; + + if (event->state != PERF_EVENT_STATE_ACTIVE) + return 0; + + return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET; +} + +/* + * Callers need to ensure there can be no nesting of this function, otherwise + * the seqlock logic goes bad. We can not serialize this because the arch + * code calls this from NMI context. + */ +void perf_event_update_userpage(struct perf_event *event) +{ + struct perf_event_mmap_page *userpg; + struct perf_buffer *buffer; + + rcu_read_lock(); + buffer = rcu_dereference(event->buffer); + if (!buffer) + goto unlock; + + userpg = buffer->user_page; + + /* + * Disable preemption so as to not let the corresponding user-space + * spin too long if we get preempted. + */ + preempt_disable(); + ++userpg->lock; + barrier(); + userpg->index = perf_event_index(event); + userpg->offset = perf_event_count(event); + if (event->state == PERF_EVENT_STATE_ACTIVE) + userpg->offset -= local64_read(&event->hw.prev_count); + + userpg->time_enabled = event->total_time_enabled + + atomic64_read(&event->child_total_time_enabled); + + userpg->time_running = event->total_time_running + + atomic64_read(&event->child_total_time_running); + + barrier(); + ++userpg->lock; + preempt_enable(); +unlock: + rcu_read_unlock(); +} + +static unsigned long perf_data_size(struct perf_buffer *buffer); + +static void +perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags) +{ + long max_size = perf_data_size(buffer); + + if (watermark) + buffer->watermark = min(max_size, watermark); + + if (!buffer->watermark) + buffer->watermark = max_size / 2; + + if (flags & PERF_BUFFER_WRITABLE) + buffer->writable = 1; + + atomic_set(&buffer->refcount, 1); +} + +#ifndef CONFIG_PERF_USE_VMALLOC + +/* + * Back perf_mmap() with regular GFP_KERNEL-0 pages. + */ + +static struct page * +perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff) +{ + if (pgoff > buffer->nr_pages) + return NULL; + + if (pgoff == 0) + return virt_to_page(buffer->user_page); + + return virt_to_page(buffer->data_pages[pgoff - 1]); +} + +static void *perf_mmap_alloc_page(int cpu) +{ + struct page *page; + int node; + + node = (cpu == -1) ? cpu : cpu_to_node(cpu); + page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); + if (!page) + return NULL; + + return page_address(page); +} + +static struct perf_buffer * +perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags) +{ + struct perf_buffer *buffer; + unsigned long size; + int i; + + size = sizeof(struct perf_buffer); + size += nr_pages * sizeof(void *); + + buffer = kzalloc(size, GFP_KERNEL); + if (!buffer) + goto fail; + + buffer->user_page = perf_mmap_alloc_page(cpu); + if (!buffer->user_page) + goto fail_user_page; + + for (i = 0; i < nr_pages; i++) { + buffer->data_pages[i] = perf_mmap_alloc_page(cpu); + if (!buffer->data_pages[i]) + goto fail_data_pages; + } + + buffer->nr_pages = nr_pages; + + perf_buffer_init(buffer, watermark, flags); + + return buffer; + +fail_data_pages: + for (i--; i >= 0; i--) + free_page((unsigned long)buffer->data_pages[i]); + + free_page((unsigned long)buffer->user_page); + +fail_user_page: + kfree(buffer); + +fail: + return NULL; +} + +static void perf_mmap_free_page(unsigned long addr) +{ + struct page *page = virt_to_page((void *)addr); + + page->mapping = NULL; + __free_page(page); +} + +static void perf_buffer_free(struct perf_buffer *buffer) +{ + int i; + + perf_mmap_free_page((unsigned long)buffer->user_page); + for (i = 0; i < buffer->nr_pages; i++) + perf_mmap_free_page((unsigned long)buffer->data_pages[i]); + kfree(buffer); +} + +static inline int page_order(struct perf_buffer *buffer) +{ + return 0; +} + +#else + +/* + * Back perf_mmap() with vmalloc memory. + * + * Required for architectures that have d-cache aliasing issues. + */ + +static inline int page_order(struct perf_buffer *buffer) +{ + return buffer->page_order; +} + +static struct page * +perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff) +{ + if (pgoff > (1UL << page_order(buffer))) + return NULL; + + return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE); +} + +static void perf_mmap_unmark_page(void *addr) +{ + struct page *page = vmalloc_to_page(addr); + + page->mapping = NULL; +} + +static void perf_buffer_free_work(struct work_struct *work) +{ + struct perf_buffer *buffer; + void *base; + int i, nr; + + buffer = container_of(work, struct perf_buffer, work); + nr = 1 << page_order(buffer); + + base = buffer->user_page; + for (i = 0; i < nr + 1; i++) + perf_mmap_unmark_page(base + (i * PAGE_SIZE)); + + vfree(base); + kfree(buffer); +} + +static void perf_buffer_free(struct perf_buffer *buffer) +{ + schedule_work(&buffer->work); +} + +static struct perf_buffer * +perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags) +{ + struct perf_buffer *buffer; + unsigned long size; + void *all_buf; + + size = sizeof(struct perf_buffer); + size += sizeof(void *); + + buffer = kzalloc(size, GFP_KERNEL); + if (!buffer) + goto fail; + + INIT_WORK(&buffer->work, perf_buffer_free_work); + + all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); + if (!all_buf) + goto fail_all_buf; + + buffer->user_page = all_buf; + buffer->data_pages[0] = all_buf + PAGE_SIZE; + buffer->page_order = ilog2(nr_pages); + buffer->nr_pages = 1; + + perf_buffer_init(buffer, watermark, flags); + + return buffer; + +fail_all_buf: + kfree(buffer); + +fail: + return NULL; +} + +#endif + +static unsigned long perf_data_size(struct perf_buffer *buffer) +{ + return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer)); +} + +static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct perf_event *event = vma->vm_file->private_data; + struct perf_buffer *buffer; + int ret = VM_FAULT_SIGBUS; + + if (vmf->flags & FAULT_FLAG_MKWRITE) { + if (vmf->pgoff == 0) + ret = 0; + return ret; + } + + rcu_read_lock(); + buffer = rcu_dereference(event->buffer); + if (!buffer) + goto unlock; + + if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) + goto unlock; + + vmf->page = perf_mmap_to_page(buffer, vmf->pgoff); + if (!vmf->page) + goto unlock; + + get_page(vmf->page); + vmf->page->mapping = vma->vm_file->f_mapping; + vmf->page->index = vmf->pgoff; + + ret = 0; +unlock: + rcu_read_unlock(); + + return ret; +} + +static void perf_buffer_free_rcu(struct rcu_head *rcu_head) +{ + struct perf_buffer *buffer; + + buffer = container_of(rcu_head, struct perf_buffer, rcu_head); + perf_buffer_free(buffer); +} + +static struct perf_buffer *perf_buffer_get(struct perf_event *event) +{ + struct perf_buffer *buffer; + + rcu_read_lock(); + buffer = rcu_dereference(event->buffer); + if (buffer) { + if (!atomic_inc_not_zero(&buffer->refcount)) + buffer = NULL; + } + rcu_read_unlock(); + + return buffer; +} + +static void perf_buffer_put(struct perf_buffer *buffer) +{ + if (!atomic_dec_and_test(&buffer->refcount)) + return; + + call_rcu(&buffer->rcu_head, perf_buffer_free_rcu); +} + +static void perf_mmap_open(struct vm_area_struct *vma) +{ + struct perf_event *event = vma->vm_file->private_data; + + atomic_inc(&event->mmap_count); +} + +static void perf_mmap_close(struct vm_area_struct *vma) +{ + struct perf_event *event = vma->vm_file->private_data; + + if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { + unsigned long size = perf_data_size(event->buffer); + struct user_struct *user = event->mmap_user; + struct perf_buffer *buffer = event->buffer; + + atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); + vma->vm_mm->locked_vm -= event->mmap_locked; + rcu_assign_pointer(event->buffer, NULL); + mutex_unlock(&event->mmap_mutex); + + perf_buffer_put(buffer); + free_uid(user); + } +} + +static const struct vm_operations_struct perf_mmap_vmops = { + .open = perf_mmap_open, + .close = perf_mmap_close, + .fault = perf_mmap_fault, + .page_mkwrite = perf_mmap_fault, +}; + +static int perf_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct perf_event *event = file->private_data; + unsigned long user_locked, user_lock_limit; + struct user_struct *user = current_user(); + unsigned long locked, lock_limit; + struct perf_buffer *buffer; + unsigned long vma_size; + unsigned long nr_pages; + long user_extra, extra; + int ret = 0, flags = 0; + + /* + * Don't allow mmap() of inherited per-task counters. This would + * create a performance issue due to all children writing to the + * same buffer. + */ + if (event->cpu == -1 && event->attr.inherit) + return -EINVAL; + + if (!(vma->vm_flags & VM_SHARED)) + return -EINVAL; + + vma_size = vma->vm_end - vma->vm_start; + nr_pages = (vma_size / PAGE_SIZE) - 1; + + /* + * If we have buffer pages ensure they're a power-of-two number, so we + * can do bitmasks instead of modulo. + */ + if (nr_pages != 0 && !is_power_of_2(nr_pages)) + return -EINVAL; + + if (vma_size != PAGE_SIZE * (1 + nr_pages)) + return -EINVAL; + + if (vma->vm_pgoff != 0) + return -EINVAL; + + WARN_ON_ONCE(event->ctx->parent_ctx); + mutex_lock(&event->mmap_mutex); + if (event->buffer) { + if (event->buffer->nr_pages == nr_pages) + atomic_inc(&event->buffer->refcount); + else + ret = -EINVAL; + goto unlock; + } + + user_extra = nr_pages + 1; + user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); + + /* + * Increase the limit linearly with more CPUs: + */ + user_lock_limit *= num_online_cpus(); + + user_locked = atomic_long_read(&user->locked_vm) + user_extra; + + extra = 0; + if (user_locked > user_lock_limit) + extra = user_locked - user_lock_limit; + + lock_limit = rlimit(RLIMIT_MEMLOCK); + lock_limit >>= PAGE_SHIFT; + locked = vma->vm_mm->locked_vm + extra; + + if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && + !capable(CAP_IPC_LOCK)) { + ret = -EPERM; + goto unlock; + } + + WARN_ON(event->buffer); + + if (vma->vm_flags & VM_WRITE) + flags |= PERF_BUFFER_WRITABLE; + + buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark, + event->cpu, flags); + if (!buffer) { + ret = -ENOMEM; + goto unlock; + } + rcu_assign_pointer(event->buffer, buffer); + + atomic_long_add(user_extra, &user->locked_vm); + event->mmap_locked = extra; + event->mmap_user = get_current_user(); + vma->vm_mm->locked_vm += event->mmap_locked; + +unlock: + if (!ret) + atomic_inc(&event->mmap_count); + mutex_unlock(&event->mmap_mutex); + + vma->vm_flags |= VM_RESERVED; + vma->vm_ops = &perf_mmap_vmops; + + return ret; +} + +static int perf_fasync(int fd, struct file *filp, int on) +{ + struct inode *inode = filp->f_path.dentry->d_inode; + struct perf_event *event = filp->private_data; + int retval; + + mutex_lock(&inode->i_mutex); + retval = fasync_helper(fd, filp, on, &event->fasync); + mutex_unlock(&inode->i_mutex); + + if (retval < 0) + return retval; + + return 0; +} + +static const struct file_operations perf_fops = { + .llseek = no_llseek, + .release = perf_release, + .read = perf_read, + .poll = perf_poll, + .unlocked_ioctl = perf_ioctl, + .compat_ioctl = perf_ioctl, + .mmap = perf_mmap, + .fasync = perf_fasync, +}; + +/* + * Perf event wakeup + * + * If there's data, ensure we set the poll() state and publish everything + * to user-space before waking everybody up. + */ + +void perf_event_wakeup(struct perf_event *event) +{ + wake_up_all(&event->waitq); + + if (event->pending_kill) { + kill_fasync(&event->fasync, SIGIO, event->pending_kill); + event->pending_kill = 0; + } +} + +static void perf_pending_event(struct irq_work *entry) +{ + struct perf_event *event = container_of(entry, + struct perf_event, pending); + + if (event->pending_disable) { + event->pending_disable = 0; + __perf_event_disable(event); + } + + if (event->pending_wakeup) { + event->pending_wakeup = 0; + perf_event_wakeup(event); + } +} + +/* + * We assume there is only KVM supporting the callbacks. + * Later on, we might change it to a list if there is + * another virtualization implementation supporting the callbacks. + */ +struct perf_guest_info_callbacks *perf_guest_cbs; + +int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) +{ + perf_guest_cbs = cbs; + return 0; +} +EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); + +int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) +{ + perf_guest_cbs = NULL; + return 0; +} +EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); + +/* + * Output + */ +static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail, + unsigned long offset, unsigned long head) +{ + unsigned long mask; + + if (!buffer->writable) + return true; + + mask = perf_data_size(buffer) - 1; + + offset = (offset - tail) & mask; + head = (head - tail) & mask; + + if ((int)(head - offset) < 0) + return false; + + return true; +} + +static void perf_output_wakeup(struct perf_output_handle *handle) +{ + atomic_set(&handle->buffer->poll, POLL_IN); + + if (handle->nmi) { + handle->event->pending_wakeup = 1; + irq_work_queue(&handle->event->pending); + } else + perf_event_wakeup(handle->event); +} + +/* + * We need to ensure a later event_id doesn't publish a head when a former + * event isn't done writing. However since we need to deal with NMIs we + * cannot fully serialize things. + * + * We only publish the head (and generate a wakeup) when the outer-most + * event completes. + */ +static void perf_output_get_handle(struct perf_output_handle *handle) +{ + struct perf_buffer *buffer = handle->buffer; + + preempt_disable(); + local_inc(&buffer->nest); + handle->wakeup = local_read(&buffer->wakeup); +} + +static void perf_output_put_handle(struct perf_output_handle *handle) +{ + struct perf_buffer *buffer = handle->buffer; + unsigned long head; + +again: + head = local_read(&buffer->head); + + /* + * IRQ/NMI can happen here, which means we can miss a head update. + */ + + if (!local_dec_and_test(&buffer->nest)) + goto out; + + /* + * Publish the known good head. Rely on the full barrier implied + * by atomic_dec_and_test() order the buffer->head read and this + * write. + */ + buffer->user_page->data_head = head; + + /* + * Now check if we missed an update, rely on the (compiler) + * barrier in atomic_dec_and_test() to re-read buffer->head. + */ + if (unlikely(head != local_read(&buffer->head))) { + local_inc(&buffer->nest); + goto again; + } + + if (handle->wakeup != local_read(&buffer->wakeup)) + perf_output_wakeup(handle); + +out: + preempt_enable(); +} + +__always_inline void perf_output_copy(struct perf_output_handle *handle, + const void *buf, unsigned int len) +{ + do { + unsigned long size = min_t(unsigned long, handle->size, len); + + memcpy(handle->addr, buf, size); + + len -= size; + handle->addr += size; + buf += size; + handle->size -= size; + if (!handle->size) { + struct perf_buffer *buffer = handle->buffer; + + handle->page++; + handle->page &= buffer->nr_pages - 1; + handle->addr = buffer->data_pages[handle->page]; + handle->size = PAGE_SIZE << page_order(buffer); + } + } while (len); +} + +static void __perf_event_header__init_id(struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_event *event) +{ + u64 sample_type = event->attr.sample_type; + + data->type = sample_type; + header->size += event->id_header_size; + + if (sample_type & PERF_SAMPLE_TID) { + /* namespace issues */ + data->tid_entry.pid = perf_event_pid(event, current); + data->tid_entry.tid = perf_event_tid(event, current); + } + + if (sample_type & PERF_SAMPLE_TIME) + data->time = perf_clock(); + + if (sample_type & PERF_SAMPLE_ID) + data->id = primary_event_id(event); + + if (sample_type & PERF_SAMPLE_STREAM_ID) + data->stream_id = event->id; + + if (sample_type & PERF_SAMPLE_CPU) { + data->cpu_entry.cpu = raw_smp_processor_id(); + data->cpu_entry.reserved = 0; + } +} + +static void perf_event_header__init_id(struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_event *event) +{ + if (event->attr.sample_id_all) + __perf_event_header__init_id(header, data, event); +} + +static void __perf_event__output_id_sample(struct perf_output_handle *handle, + struct perf_sample_data *data) +{ + u64 sample_type = data->type; + + if (sample_type & PERF_SAMPLE_TID) + perf_output_put(handle, data->tid_entry); + + if (sample_type & PERF_SAMPLE_TIME) + perf_output_put(handle, data->time); + + if (sample_type & PERF_SAMPLE_ID) + perf_output_put(handle, data->id); + + if (sample_type & PERF_SAMPLE_STREAM_ID) + perf_output_put(handle, data->stream_id); + + if (sample_type & PERF_SAMPLE_CPU) + perf_output_put(handle, data->cpu_entry); +} + +static void perf_event__output_id_sample(struct perf_event *event, + struct perf_output_handle *handle, + struct perf_sample_data *sample) +{ + if (event->attr.sample_id_all) + __perf_event__output_id_sample(handle, sample); +} + +int perf_output_begin(struct perf_output_handle *handle, + struct perf_event *event, unsigned int size, + int nmi, int sample) +{ + struct perf_buffer *buffer; + unsigned long tail, offset, head; + int have_lost; + struct perf_sample_data sample_data; + struct { + struct perf_event_header header; + u64 id; + u64 lost; + } lost_event; + + rcu_read_lock(); + /* + * For inherited events we send all the output towards the parent. + */ + if (event->parent) + event = event->parent; + + buffer = rcu_dereference(event->buffer); + if (!buffer) + goto out; + + handle->buffer = buffer; + handle->event = event; + handle->nmi = nmi; + handle->sample = sample; + + if (!buffer->nr_pages) + goto out; + + have_lost = local_read(&buffer->lost); + if (have_lost) { + lost_event.header.size = sizeof(lost_event); + perf_event_header__init_id(&lost_event.header, &sample_data, + event); + size += lost_event.header.size; + } + + perf_output_get_handle(handle); + + do { + /* + * Userspace could choose to issue a mb() before updating the + * tail pointer. So that all reads will be completed before the + * write is issued. + */ + tail = ACCESS_ONCE(buffer->user_page->data_tail); + smp_rmb(); + offset = head = local_read(&buffer->head); + head += size; + if (unlikely(!perf_output_space(buffer, tail, offset, head))) + goto fail; + } while (local_cmpxchg(&buffer->head, offset, head) != offset); + + if (head - local_read(&buffer->wakeup) > buffer->watermark) + local_add(buffer->watermark, &buffer->wakeup); + + handle->page = offset >> (PAGE_SHIFT + page_order(buffer)); + handle->page &= buffer->nr_pages - 1; + handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1); + handle->addr = buffer->data_pages[handle->page]; + handle->addr += handle->size; + handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size; + + if (have_lost) { + lost_event.header.type = PERF_RECORD_LOST; + lost_event.header.misc = 0; + lost_event.id = event->id; + lost_event.lost = local_xchg(&buffer->lost, 0); + + perf_output_put(handle, lost_event); + perf_event__output_id_sample(event, handle, &sample_data); + } + + return 0; + +fail: + local_inc(&buffer->lost); + perf_output_put_handle(handle); +out: + rcu_read_unlock(); + + return -ENOSPC; +} + +void perf_output_end(struct perf_output_handle *handle) +{ + struct perf_event *event = handle->event; + struct perf_buffer *buffer = handle->buffer; + + int wakeup_events = event->attr.wakeup_events; + + if (handle->sample && wakeup_events) { + int events = local_inc_return(&buffer->events); + if (events >= wakeup_events) { + local_sub(wakeup_events, &buffer->events); + local_inc(&buffer->wakeup); + } + } + + perf_output_put_handle(handle); + rcu_read_unlock(); +} + +static void perf_output_read_one(struct perf_output_handle *handle, + struct perf_event *event, + u64 enabled, u64 running) +{ + u64 read_format = event->attr.read_format; + u64 values[4]; + int n = 0; + + values[n++] = perf_event_count(event); + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { + values[n++] = enabled + + atomic64_read(&event->child_total_time_enabled); + } + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { + values[n++] = running + + atomic64_read(&event->child_total_time_running); + } + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_event_id(event); + + perf_output_copy(handle, values, n * sizeof(u64)); +} + +/* + * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. + */ +static void perf_output_read_group(struct perf_output_handle *handle, + struct perf_event *event, + u64 enabled, u64 running) +{ + struct perf_event *leader = event->group_leader, *sub; + u64 read_format = event->attr.read_format; + u64 values[5]; + int n = 0; + + values[n++] = 1 + leader->nr_siblings; + + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) + values[n++] = enabled; + + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) + values[n++] = running; + + if (leader != event) + leader->pmu->read(leader); + + values[n++] = perf_event_count(leader); + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_event_id(leader); + + perf_output_copy(handle, values, n * sizeof(u64)); + + list_for_each_entry(sub, &leader->sibling_list, group_entry) { + n = 0; + + if (sub != event) + sub->pmu->read(sub); + + values[n++] = perf_event_count(sub); + if (read_format & PERF_FORMAT_ID) + values[n++] = primary_event_id(sub); + + perf_output_copy(handle, values, n * sizeof(u64)); + } +} + +#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ + PERF_FORMAT_TOTAL_TIME_RUNNING) + +static void perf_output_read(struct perf_output_handle *handle, + struct perf_event *event) +{ + u64 enabled = 0, running = 0, now, ctx_time; + u64 read_format = event->attr.read_format; + + /* + * compute total_time_enabled, total_time_running + * based on snapshot values taken when the event + * was last scheduled in. + * + * we cannot simply called update_context_time() + * because of locking issue as we are called in + * NMI context + */ + if (read_format & PERF_FORMAT_TOTAL_TIMES) { + now = perf_clock(); + ctx_time = event->shadow_ctx_time + now; + enabled = ctx_time - event->tstamp_enabled; + running = ctx_time - event->tstamp_running; + } + + if (event->attr.read_format & PERF_FORMAT_GROUP) + perf_output_read_group(handle, event, enabled, running); + else + perf_output_read_one(handle, event, enabled, running); +} + +void perf_output_sample(struct perf_output_handle *handle, + struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_event *event) +{ + u64 sample_type = data->type; + + perf_output_put(handle, *header); + + if (sample_type & PERF_SAMPLE_IP) + perf_output_put(handle, data->ip); + + if (sample_type & PERF_SAMPLE_TID) + perf_output_put(handle, data->tid_entry); + + if (sample_type & PERF_SAMPLE_TIME) + perf_output_put(handle, data->time); + + if (sample_type & PERF_SAMPLE_ADDR) + perf_output_put(handle, data->addr); + + if (sample_type & PERF_SAMPLE_ID) + perf_output_put(handle, data->id); + + if (sample_type & PERF_SAMPLE_STREAM_ID) + perf_output_put(handle, data->stream_id); + + if (sample_type & PERF_SAMPLE_CPU) + perf_output_put(handle, data->cpu_entry); + + if (sample_type & PERF_SAMPLE_PERIOD) + perf_output_put(handle, data->period); + + if (sample_type & PERF_SAMPLE_READ) + perf_output_read(handle, event); + + if (sample_type & PERF_SAMPLE_CALLCHAIN) { + if (data->callchain) { + int size = 1; + + if (data->callchain) + size += data->callchain->nr; + + size *= sizeof(u64); + + perf_output_copy(handle, data->callchain, size); + } else { + u64 nr = 0; + perf_output_put(handle, nr); + } + } + + if (sample_type & PERF_SAMPLE_RAW) { + if (data->raw) { + perf_output_put(handle, data->raw->size); + perf_output_copy(handle, data->raw->data, + data->raw->size); + } else { + struct { + u32 size; + u32 data; + } raw = { + .size = sizeof(u32), + .data = 0, + }; + perf_output_put(handle, raw); + } + } +} + +void perf_prepare_sample(struct perf_event_header *header, + struct perf_sample_data *data, + struct perf_event *event, + struct pt_regs *regs) +{ + u64 sample_type = event->attr.sample_type; + + header->type = PERF_RECORD_SAMPLE; + header->size = sizeof(*header) + event->header_size; + + header->misc = 0; + header->misc |= perf_misc_flags(regs); + + __perf_event_header__init_id(header, data, event); + + if (sample_type & PERF_SAMPLE_IP) + data->ip = perf_instruction_pointer(regs); + + if (sample_type & PERF_SAMPLE_CALLCHAIN) { + int size = 1; + + data->callchain = perf_callchain(regs); + + if (data->callchain) + size += data->callchain->nr; + + header->size += size * sizeof(u64); + } + + if (sample_type & PERF_SAMPLE_RAW) { + int size = sizeof(u32); + + if (data->raw) + size += data->raw->size; + else + size += sizeof(u32); + + WARN_ON_ONCE(size & (sizeof(u64)-1)); + header->size += size; + } +} + +static void perf_event_output(struct perf_event *event, int nmi, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct perf_output_handle handle; + struct perf_event_header header; + + /* protect the callchain buffers */ + rcu_read_lock(); + + perf_prepare_sample(&header, data, event, regs); + + if (perf_output_begin(&handle, event, header.size, nmi, 1)) + goto exit; + + perf_output_sample(&handle, &header, data, event); + + perf_output_end(&handle); + +exit: + rcu_read_unlock(); +} + +/* + * read event_id + */ + +struct perf_read_event { + struct perf_event_header header; + + u32 pid; + u32 tid; +}; + +static void +perf_event_read_event(struct perf_event *event, + struct task_struct *task) +{ + struct perf_output_handle handle; + struct perf_sample_data sample; + struct perf_read_event read_event = { + .header = { + .type = PERF_RECORD_READ, + .misc = 0, + .size = sizeof(read_event) + event->read_size, + }, + .pid = perf_event_pid(event, task), + .tid = perf_event_tid(event, task), + }; + int ret; + + perf_event_header__init_id(&read_event.header, &sample, event); + ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0); + if (ret) + return; + + perf_output_put(&handle, read_event); + perf_output_read(&handle, event); + perf_event__output_id_sample(event, &handle, &sample); + + perf_output_end(&handle); +} + +/* + * task tracking -- fork/exit + * + * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task + */ + +struct perf_task_event { + struct task_struct *task; + struct perf_event_context *task_ctx; + + struct { + struct perf_event_header header; + + u32 pid; + u32 ppid; + u32 tid; + u32 ptid; + u64 time; + } event_id; +}; + +static void perf_event_task_output(struct perf_event *event, + struct perf_task_event *task_event) +{ + struct perf_output_handle handle; + struct perf_sample_data sample; + struct task_struct *task = task_event->task; + int ret, size = task_event->event_id.header.size; + + perf_event_header__init_id(&task_event->event_id.header, &sample, event); + + ret = perf_output_begin(&handle, event, + task_event->event_id.header.size, 0, 0); + if (ret) + goto out; + + task_event->event_id.pid = perf_event_pid(event, task); + task_event->event_id.ppid = perf_event_pid(event, current); + + task_event->event_id.tid = perf_event_tid(event, task); + task_event->event_id.ptid = perf_event_tid(event, current); + + perf_output_put(&handle, task_event->event_id); + + perf_event__output_id_sample(event, &handle, &sample); + + perf_output_end(&handle); +out: + task_event->event_id.header.size = size; +} + +static int perf_event_task_match(struct perf_event *event) +{ + if (event->state < PERF_EVENT_STATE_INACTIVE) + return 0; + + if (!event_filter_match(event)) + return 0; + + if (event->attr.comm || event->attr.mmap || + event->attr.mmap_data || event->attr.task) + return 1; + + return 0; +} + +static void perf_event_task_ctx(struct perf_event_context *ctx, + struct perf_task_event *task_event) +{ + struct perf_event *event; + + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { + if (perf_event_task_match(event)) + perf_event_task_output(event, task_event); + } +} + +static void perf_event_task_event(struct perf_task_event *task_event) +{ + struct perf_cpu_context *cpuctx; + struct perf_event_context *ctx; + struct pmu *pmu; + int ctxn; + + rcu_read_lock(); + list_for_each_entry_rcu(pmu, &pmus, entry) { + cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); + if (cpuctx->active_pmu != pmu) + goto next; + perf_event_task_ctx(&cpuctx->ctx, task_event); + + ctx = task_event->task_ctx; + if (!ctx) { + ctxn = pmu->task_ctx_nr; + if (ctxn < 0) + goto next; + ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); + } + if (ctx) + perf_event_task_ctx(ctx, task_event); +next: + put_cpu_ptr(pmu->pmu_cpu_context); + } + rcu_read_unlock(); +} + +static void perf_event_task(struct task_struct *task, + struct perf_event_context *task_ctx, + int new) +{ + struct perf_task_event task_event; + + if (!atomic_read(&nr_comm_events) && + !atomic_read(&nr_mmap_events) && + !atomic_read(&nr_task_events)) + return; + + task_event = (struct perf_task_event){ + .task = task, + .task_ctx = task_ctx, + .event_id = { + .header = { + .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, + .misc = 0, + .size = sizeof(task_event.event_id), + }, + /* .pid */ + /* .ppid */ + /* .tid */ + /* .ptid */ + .time = perf_clock(), + }, + }; + + perf_event_task_event(&task_event); +} + +void perf_event_fork(struct task_struct *task) +{ + perf_event_task(task, NULL, 1); +} + +/* + * comm tracking + */ + +struct perf_comm_event { + struct task_struct *task; + char *comm; + int comm_size; + + struct { + struct perf_event_header header; + + u32 pid; + u32 tid; + } event_id; +}; + +static void perf_event_comm_output(struct perf_event *event, + struct perf_comm_event *comm_event) +{ + struct perf_output_handle handle; + struct perf_sample_data sample; + int size = comm_event->event_id.header.size; + int ret; + + perf_event_header__init_id(&comm_event->event_id.header, &sample, event); + ret = perf_output_begin(&handle, event, + comm_event->event_id.header.size, 0, 0); + + if (ret) + goto out; + + comm_event->event_id.pid = perf_event_pid(event, comm_event->task); + comm_event->event_id.tid = perf_event_tid(event, comm_event->task); + + perf_output_put(&handle, comm_event->event_id); + perf_output_copy(&handle, comm_event->comm, + comm_event->comm_size); + + perf_event__output_id_sample(event, &handle, &sample); + + perf_output_end(&handle); +out: + comm_event->event_id.header.size = size; +} + +static int perf_event_comm_match(struct perf_event *event) +{ + if (event->state < PERF_EVENT_STATE_INACTIVE) + return 0; + + if (!event_filter_match(event)) + return 0; + + if (event->attr.comm) + return 1; + + return 0; +} + +static void perf_event_comm_ctx(struct perf_event_context *ctx, + struct perf_comm_event *comm_event) +{ + struct perf_event *event; + + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { + if (perf_event_comm_match(event)) + perf_event_comm_output(event, comm_event); + } +} + +static void perf_event_comm_event(struct perf_comm_event *comm_event) +{ + struct perf_cpu_context *cpuctx; + struct perf_event_context *ctx; + char comm[TASK_COMM_LEN]; + unsigned int size; + struct pmu *pmu; + int ctxn; + + memset(comm, 0, sizeof(comm)); + strlcpy(comm, comm_event->task->comm, sizeof(comm)); + size = ALIGN(strlen(comm)+1, sizeof(u64)); + + comm_event->comm = comm; + comm_event->comm_size = size; + + comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; + rcu_read_lock(); + list_for_each_entry_rcu(pmu, &pmus, entry) { + cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); + if (cpuctx->active_pmu != pmu) + goto next; + perf_event_comm_ctx(&cpuctx->ctx, comm_event); + + ctxn = pmu->task_ctx_nr; + if (ctxn < 0) + goto next; + + ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); + if (ctx) + perf_event_comm_ctx(ctx, comm_event); +next: + put_cpu_ptr(pmu->pmu_cpu_context); + } + rcu_read_unlock(); +} + +void perf_event_comm(struct task_struct *task) +{ + struct perf_comm_event comm_event; + struct perf_event_context *ctx; + int ctxn; + + for_each_task_context_nr(ctxn) { + ctx = task->perf_event_ctxp[ctxn]; + if (!ctx) + continue; + + perf_event_enable_on_exec(ctx); + } + + if (!atomic_read(&nr_comm_events)) + return; + + comm_event = (struct perf_comm_event){ + .task = task, + /* .comm */ + /* .comm_size */ + .event_id = { + .header = { + .type = PERF_RECORD_COMM, + .misc = 0, + /* .size */ + }, + /* .pid */ + /* .tid */ + }, + }; + + perf_event_comm_event(&comm_event); +} + +/* + * mmap tracking + */ + +struct perf_mmap_event { + struct vm_area_struct *vma; + + const char *file_name; + int file_size; + + struct { + struct perf_event_header header; + + u32 pid; + u32 tid; + u64 start; + u64 len; + u64 pgoff; + } event_id; +}; + +static void perf_event_mmap_output(struct perf_event *event, + struct perf_mmap_event *mmap_event) +{ + struct perf_output_handle handle; + struct perf_sample_data sample; + int size = mmap_event->event_id.header.size; + int ret; + + perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); + ret = perf_output_begin(&handle, event, + mmap_event->event_id.header.size, 0, 0); + if (ret) + goto out; + + mmap_event->event_id.pid = perf_event_pid(event, current); + mmap_event->event_id.tid = perf_event_tid(event, current); + + perf_output_put(&handle, mmap_event->event_id); + perf_output_copy(&handle, mmap_event->file_name, + mmap_event->file_size); + + perf_event__output_id_sample(event, &handle, &sample); + + perf_output_end(&handle); +out: + mmap_event->event_id.header.size = size; +} + +static int perf_event_mmap_match(struct perf_event *event, + struct perf_mmap_event *mmap_event, + int executable) +{ + if (event->state < PERF_EVENT_STATE_INACTIVE) + return 0; + + if (!event_filter_match(event)) + return 0; + + if ((!executable && event->attr.mmap_data) || + (executable && event->attr.mmap)) + return 1; + + return 0; +} + +static void perf_event_mmap_ctx(struct perf_event_context *ctx, + struct perf_mmap_event *mmap_event, + int executable) +{ + struct perf_event *event; + + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { + if (perf_event_mmap_match(event, mmap_event, executable)) + perf_event_mmap_output(event, mmap_event); + } +} + +static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) +{ + struct perf_cpu_context *cpuctx; + struct perf_event_context *ctx; + struct vm_area_struct *vma = mmap_event->vma; + struct file *file = vma->vm_file; + unsigned int size; + char tmp[16]; + char *buf = NULL; + const char *name; + struct pmu *pmu; + int ctxn; + + memset(tmp, 0, sizeof(tmp)); + + if (file) { + /* + * d_path works from the end of the buffer backwards, so we + * need to add enough zero bytes after the string to handle + * the 64bit alignment we do later. + */ + buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL); + if (!buf) { + name = strncpy(tmp, "//enomem", sizeof(tmp)); + goto got_name; + } + name = d_path(&file->f_path, buf, PATH_MAX); + if (IS_ERR(name)) { + name = strncpy(tmp, "//toolong", sizeof(tmp)); + goto got_name; + } + } else { + if (arch_vma_name(mmap_event->vma)) { + name = strncpy(tmp, arch_vma_name(mmap_event->vma), + sizeof(tmp)); + goto got_name; + } + + if (!vma->vm_mm) { + name = strncpy(tmp, "[vdso]", sizeof(tmp)); + goto got_name; + } else if (vma->vm_start <= vma->vm_mm->start_brk && + vma->vm_end >= vma->vm_mm->brk) { + name = strncpy(tmp, "[heap]", sizeof(tmp)); + goto got_name; + } else if (vma->vm_start <= vma->vm_mm->start_stack && + vma->vm_end >= vma->vm_mm->start_stack) { + name = strncpy(tmp, "[stack]", sizeof(tmp)); + goto got_name; + } + + name = strncpy(tmp, "//anon", sizeof(tmp)); + goto got_name; + } + +got_name: + size = ALIGN(strlen(name)+1, sizeof(u64)); + + mmap_event->file_name = name; + mmap_event->file_size = size; + + mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; + + rcu_read_lock(); + list_for_each_entry_rcu(pmu, &pmus, entry) { + cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); + if (cpuctx->active_pmu != pmu) + goto next; + perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, + vma->vm_flags & VM_EXEC); + + ctxn = pmu->task_ctx_nr; + if (ctxn < 0) + goto next; + + ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); + if (ctx) { + perf_event_mmap_ctx(ctx, mmap_event, + vma->vm_flags & VM_EXEC); + } +next: + put_cpu_ptr(pmu->pmu_cpu_context); + } + rcu_read_unlock(); + + kfree(buf); +} + +void perf_event_mmap(struct vm_area_struct *vma) +{ + struct perf_mmap_event mmap_event; + + if (!atomic_read(&nr_mmap_events)) + return; + + mmap_event = (struct perf_mmap_event){ + .vma = vma, + /* .file_name */ + /* .file_size */ + .event_id = { + .header = { + .type = PERF_RECORD_MMAP, + .misc = PERF_RECORD_MISC_USER, + /* .size */ + }, + /* .pid */ + /* .tid */ + .start = vma->vm_start, + .len = vma->vm_end - vma->vm_start, + .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, + }, + }; + + perf_event_mmap_event(&mmap_event); +} + +/* + * IRQ throttle logging + */ + +static void perf_log_throttle(struct perf_event *event, int enable) +{ + struct perf_output_handle handle; + struct perf_sample_data sample; + int ret; + + struct { + struct perf_event_header header; + u64 time; + u64 id; + u64 stream_id; + } throttle_event = { + .header = { + .type = PERF_RECORD_THROTTLE, + .misc = 0, + .size = sizeof(throttle_event), + }, + .time = perf_clock(), + .id = primary_event_id(event), + .stream_id = event->id, + }; + + if (enable) + throttle_event.header.type = PERF_RECORD_UNTHROTTLE; + + perf_event_header__init_id(&throttle_event.header, &sample, event); + + ret = perf_output_begin(&handle, event, + throttle_event.header.size, 1, 0); + if (ret) + return; + + perf_output_put(&handle, throttle_event); + perf_event__output_id_sample(event, &handle, &sample); + perf_output_end(&handle); +} + +/* + * Generic event overflow handling, sampling. + */ + +static int __perf_event_overflow(struct perf_event *event, int nmi, + int throttle, struct perf_sample_data *data, + struct pt_regs *regs) +{ + int events = atomic_read(&event->event_limit); + struct hw_perf_event *hwc = &event->hw; + int ret = 0; + + /* + * Non-sampling counters might still use the PMI to fold short + * hardware counters, ignore those. + */ + if (unlikely(!is_sampling_event(event))) + return 0; + + if (unlikely(hwc->interrupts >= max_samples_per_tick)) { + if (throttle) { + hwc->interrupts = MAX_INTERRUPTS; + perf_log_throttle(event, 0); + ret = 1; + } + } else + hwc->interrupts++; + + if (event->attr.freq) { + u64 now = perf_clock(); + s64 delta = now - hwc->freq_time_stamp; + + hwc->freq_time_stamp = now; + + if (delta > 0 && delta < 2*TICK_NSEC) + perf_adjust_period(event, delta, hwc->last_period); + } + + /* + * XXX event_limit might not quite work as expected on inherited + * events + */ + + event->pending_kill = POLL_IN; + if (events && atomic_dec_and_test(&event->event_limit)) { + ret = 1; + event->pending_kill = POLL_HUP; + if (nmi) { + event->pending_disable = 1; + irq_work_queue(&event->pending); + } else + perf_event_disable(event); + } + + if (event->overflow_handler) + event->overflow_handler(event, nmi, data, regs); + else + perf_event_output(event, nmi, data, regs); + + return ret; +} + +int perf_event_overflow(struct perf_event *event, int nmi, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + return __perf_event_overflow(event, nmi, 1, data, regs); +} + +/* + * Generic software event infrastructure + */ + +struct swevent_htable { + struct swevent_hlist *swevent_hlist; + struct mutex hlist_mutex; + int hlist_refcount; + + /* Recursion avoidance in each contexts */ + int recursion[PERF_NR_CONTEXTS]; +}; + +static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); + +/* + * We directly increment event->count and keep a second value in + * event->hw.period_left to count intervals. This period event + * is kept in the range [-sample_period, 0] so that we can use the + * sign as trigger. + */ + +static u64 perf_swevent_set_period(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 period = hwc->last_period; + u64 nr, offset; + s64 old, val; + + hwc->last_period = hwc->sample_period; + +again: + old = val = local64_read(&hwc->period_left); + if (val < 0) + return 0; + + nr = div64_u64(period + val, period); + offset = nr * period; + val -= offset; + if (local64_cmpxchg(&hwc->period_left, old, val) != old) + goto again; + + return nr; +} + +static void perf_swevent_overflow(struct perf_event *event, u64 overflow, + int nmi, struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct hw_perf_event *hwc = &event->hw; + int throttle = 0; + + data->period = event->hw.last_period; + if (!overflow) + overflow = perf_swevent_set_period(event); + + if (hwc->interrupts == MAX_INTERRUPTS) + return; + + for (; overflow; overflow--) { + if (__perf_event_overflow(event, nmi, throttle, + data, regs)) { + /* + * We inhibit the overflow from happening when + * hwc->interrupts == MAX_INTERRUPTS. + */ + break; + } + throttle = 1; + } +} + +static void perf_swevent_event(struct perf_event *event, u64 nr, + int nmi, struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct hw_perf_event *hwc = &event->hw; + + local64_add(nr, &event->count); + + if (!regs) + return; + + if (!is_sampling_event(event)) + return; + + if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) + return perf_swevent_overflow(event, 1, nmi, data, regs); + + if (local64_add_negative(nr, &hwc->period_left)) + return; + + perf_swevent_overflow(event, 0, nmi, data, regs); +} + +static int perf_exclude_event(struct perf_event *event, + struct pt_regs *regs) +{ + if (event->hw.state & PERF_HES_STOPPED) + return 1; + + if (regs) { + if (event->attr.exclude_user && user_mode(regs)) + return 1; + + if (event->attr.exclude_kernel && !user_mode(regs)) + return 1; + } + + return 0; +} + +static int perf_swevent_match(struct perf_event *event, + enum perf_type_id type, + u32 event_id, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + if (event->attr.type != type) + return 0; + + if (event->attr.config != event_id) + return 0; + + if (perf_exclude_event(event, regs)) + return 0; + + return 1; +} + +static inline u64 swevent_hash(u64 type, u32 event_id) +{ + u64 val = event_id | (type << 32); + + return hash_64(val, SWEVENT_HLIST_BITS); +} + +static inline struct hlist_head * +__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) +{ + u64 hash = swevent_hash(type, event_id); + + return &hlist->heads[hash]; +} + +/* For the read side: events when they trigger */ +static inline struct hlist_head * +find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id) +{ + struct swevent_hlist *hlist; + + hlist = rcu_dereference(swhash->swevent_hlist); + if (!hlist) + return NULL; + + return __find_swevent_head(hlist, type, event_id); +} + +/* For the event head insertion and removal in the hlist */ +static inline struct hlist_head * +find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) +{ + struct swevent_hlist *hlist; + u32 event_id = event->attr.config; + u64 type = event->attr.type; + + /* + * Event scheduling is always serialized against hlist allocation + * and release. Which makes the protected version suitable here. + * The context lock guarantees that. + */ + hlist = rcu_dereference_protected(swhash->swevent_hlist, + lockdep_is_held(&event->ctx->lock)); + if (!hlist) + return NULL; + + return __find_swevent_head(hlist, type, event_id); +} + +static void do_perf_sw_event(enum perf_type_id type, u32 event_id, + u64 nr, int nmi, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); + struct perf_event *event; + struct hlist_node *node; + struct hlist_head *head; + + rcu_read_lock(); + head = find_swevent_head_rcu(swhash, type, event_id); + if (!head) + goto end; + + hlist_for_each_entry_rcu(event, node, head, hlist_entry) { + if (perf_swevent_match(event, type, event_id, data, regs)) + perf_swevent_event(event, nr, nmi, data, regs); + } +end: + rcu_read_unlock(); +} + +int perf_swevent_get_recursion_context(void) +{ + struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); + + return get_recursion_context(swhash->recursion); +} +EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); + +inline void perf_swevent_put_recursion_context(int rctx) +{ + struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); + + put_recursion_context(swhash->recursion, rctx); +} + +void __perf_sw_event(u32 event_id, u64 nr, int nmi, + struct pt_regs *regs, u64 addr) +{ + struct perf_sample_data data; + int rctx; + + preempt_disable_notrace(); + rctx = perf_swevent_get_recursion_context(); + if (rctx < 0) + return; + + perf_sample_data_init(&data, addr); + + do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs); + + perf_swevent_put_recursion_context(rctx); + preempt_enable_notrace(); +} + +static void perf_swevent_read(struct perf_event *event) +{ +} + +static int perf_swevent_add(struct perf_event *event, int flags) +{ + struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); + struct hw_perf_event *hwc = &event->hw; + struct hlist_head *head; + + if (is_sampling_event(event)) { + hwc->last_period = hwc->sample_period; + perf_swevent_set_period(event); + } + + hwc->state = !(flags & PERF_EF_START); + + head = find_swevent_head(swhash, event); + if (WARN_ON_ONCE(!head)) + return -EINVAL; + + hlist_add_head_rcu(&event->hlist_entry, head); + + return 0; +} + +static void perf_swevent_del(struct perf_event *event, int flags) +{ + hlist_del_rcu(&event->hlist_entry); +} + +static void perf_swevent_start(struct perf_event *event, int flags) +{ + event->hw.state = 0; +} + +static void perf_swevent_stop(struct perf_event *event, int flags) +{ + event->hw.state = PERF_HES_STOPPED; +} + +/* Deref the hlist from the update side */ +static inline struct swevent_hlist * +swevent_hlist_deref(struct swevent_htable *swhash) +{ + return rcu_dereference_protected(swhash->swevent_hlist, + lockdep_is_held(&swhash->hlist_mutex)); +} + +static void swevent_hlist_release_rcu(struct rcu_head *rcu_head) +{ + struct swevent_hlist *hlist; + + hlist = container_of(rcu_head, struct swevent_hlist, rcu_head); + kfree(hlist); +} + +static void swevent_hlist_release(struct swevent_htable *swhash) +{ + struct swevent_hlist *hlist = swevent_hlist_deref(swhash); + + if (!hlist) + return; + + rcu_assign_pointer(swhash->swevent_hlist, NULL); + call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu); +} + +static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) +{ + struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); + + mutex_lock(&swhash->hlist_mutex); + + if (!--swhash->hlist_refcount) + swevent_hlist_release(swhash); + + mutex_unlock(&swhash->hlist_mutex); +} + +static void swevent_hlist_put(struct perf_event *event) +{ + int cpu; + + if (event->cpu != -1) { + swevent_hlist_put_cpu(event, event->cpu); + return; + } + + for_each_possible_cpu(cpu) + swevent_hlist_put_cpu(event, cpu); +} + +static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) +{ + struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); + int err = 0; + + mutex_lock(&swhash->hlist_mutex); + + if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { + struct swevent_hlist *hlist; + + hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); + if (!hlist) { + err = -ENOMEM; + goto exit; + } + rcu_assign_pointer(swhash->swevent_hlist, hlist); + } + swhash->hlist_refcount++; +exit: + mutex_unlock(&swhash->hlist_mutex); + + return err; +} + +static int swevent_hlist_get(struct perf_event *event) +{ + int err; + int cpu, failed_cpu; + + if (event->cpu != -1) + return swevent_hlist_get_cpu(event, event->cpu); + + get_online_cpus(); + for_each_possible_cpu(cpu) { + err = swevent_hlist_get_cpu(event, cpu); + if (err) { + failed_cpu = cpu; + goto fail; + } + } + put_online_cpus(); + + return 0; +fail: + for_each_possible_cpu(cpu) { + if (cpu == failed_cpu) + break; + swevent_hlist_put_cpu(event, cpu); + } + + put_online_cpus(); + return err; +} + +struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; + +static void sw_perf_event_destroy(struct perf_event *event) +{ + u64 event_id = event->attr.config; + + WARN_ON(event->parent); + + jump_label_dec(&perf_swevent_enabled[event_id]); + swevent_hlist_put(event); +} + +static int perf_swevent_init(struct perf_event *event) +{ + int event_id = event->attr.config; + + if (event->attr.type != PERF_TYPE_SOFTWARE) + return -ENOENT; + + switch (event_id) { + case PERF_COUNT_SW_CPU_CLOCK: + case PERF_COUNT_SW_TASK_CLOCK: + return -ENOENT; + + default: + break; + } + + if (event_id >= PERF_COUNT_SW_MAX) + return -ENOENT; + + if (!event->parent) { + int err; + + err = swevent_hlist_get(event); + if (err) + return err; + + jump_label_inc(&perf_swevent_enabled[event_id]); + event->destroy = sw_perf_event_destroy; + } + + return 0; +} + +static struct pmu perf_swevent = { + .task_ctx_nr = perf_sw_context, + + .event_init = perf_swevent_init, + .add = perf_swevent_add, + .del = perf_swevent_del, + .start = perf_swevent_start, + .stop = perf_swevent_stop, + .read = perf_swevent_read, +}; + +#ifdef CONFIG_EVENT_TRACING + +static int perf_tp_filter_match(struct perf_event *event, + struct perf_sample_data *data) +{ + void *record = data->raw->data; + + if (likely(!event->filter) || filter_match_preds(event->filter, record)) + return 1; + return 0; +} + +static int perf_tp_event_match(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + if (event->hw.state & PERF_HES_STOPPED) + return 0; + /* + * All tracepoints are from kernel-space. + */ + if (event->attr.exclude_kernel) + return 0; + + if (!perf_tp_filter_match(event, data)) + return 0; + + return 1; +} + +void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, + struct pt_regs *regs, struct hlist_head *head, int rctx) +{ + struct perf_sample_data data; + struct perf_event *event; + struct hlist_node *node; + + struct perf_raw_record raw = { + .size = entry_size, + .data = record, + }; + + perf_sample_data_init(&data, addr); + data.raw = &raw; + + hlist_for_each_entry_rcu(event, node, head, hlist_entry) { + if (perf_tp_event_match(event, &data, regs)) + perf_swevent_event(event, count, 1, &data, regs); + } + + perf_swevent_put_recursion_context(rctx); +} +EXPORT_SYMBOL_GPL(perf_tp_event); + +static void tp_perf_event_destroy(struct perf_event *event) +{ + perf_trace_destroy(event); +} + +static int perf_tp_event_init(struct perf_event *event) +{ + int err; + + if (event->attr.type != PERF_TYPE_TRACEPOINT) + return -ENOENT; + + err = perf_trace_init(event); + if (err) + return err; + + event->destroy = tp_perf_event_destroy; + + return 0; +} + +static struct pmu perf_tracepoint = { + .task_ctx_nr = perf_sw_context, + + .event_init = perf_tp_event_init, + .add = perf_trace_add, + .del = perf_trace_del, + .start = perf_swevent_start, + .stop = perf_swevent_stop, + .read = perf_swevent_read, +}; + +static inline void perf_tp_register(void) +{ + perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT); +} + +static int perf_event_set_filter(struct perf_event *event, void __user *arg) +{ + char *filter_str; + int ret; + + if (event->attr.type != PERF_TYPE_TRACEPOINT) + return -EINVAL; + + filter_str = strndup_user(arg, PAGE_SIZE); + if (IS_ERR(filter_str)) + return PTR_ERR(filter_str); + + ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); + + kfree(filter_str); + return ret; +} + +static void perf_event_free_filter(struct perf_event *event) +{ + ftrace_profile_free_filter(event); +} + +#else + +static inline void perf_tp_register(void) +{ +} + +static int perf_event_set_filter(struct perf_event *event, void __user *arg) +{ + return -ENOENT; +} + +static void perf_event_free_filter(struct perf_event *event) +{ +} + +#endif /* CONFIG_EVENT_TRACING */ + +#ifdef CONFIG_HAVE_HW_BREAKPOINT +void perf_bp_event(struct perf_event *bp, void *data) +{ + struct perf_sample_data sample; + struct pt_regs *regs = data; + + perf_sample_data_init(&sample, bp->attr.bp_addr); + + if (!bp->hw.state && !perf_exclude_event(bp, regs)) + perf_swevent_event(bp, 1, 1, &sample, regs); +} +#endif + +/* + * hrtimer based swevent callback + */ + +static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) +{ + enum hrtimer_restart ret = HRTIMER_RESTART; + struct perf_sample_data data; + struct pt_regs *regs; + struct perf_event *event; + u64 period; + + event = container_of(hrtimer, struct perf_event, hw.hrtimer); + + if (event->state != PERF_EVENT_STATE_ACTIVE) + return HRTIMER_NORESTART; + + event->pmu->read(event); + + perf_sample_data_init(&data, 0); + data.period = event->hw.last_period; + regs = get_irq_regs(); + + if (regs && !perf_exclude_event(event, regs)) { + if (!(event->attr.exclude_idle && current->pid == 0)) + if (perf_event_overflow(event, 0, &data, regs)) + ret = HRTIMER_NORESTART; + } + + period = max_t(u64, 10000, event->hw.sample_period); + hrtimer_forward_now(hrtimer, ns_to_ktime(period)); + + return ret; +} + +static void perf_swevent_start_hrtimer(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + s64 period; + + if (!is_sampling_event(event)) + return; + + period = local64_read(&hwc->period_left); + if (period) { + if (period < 0) + period = 10000; + + local64_set(&hwc->period_left, 0); + } else { + period = max_t(u64, 10000, hwc->sample_period); + } + __hrtimer_start_range_ns(&hwc->hrtimer, + ns_to_ktime(period), 0, + HRTIMER_MODE_REL_PINNED, 0); +} + +static void perf_swevent_cancel_hrtimer(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (is_sampling_event(event)) { + ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); + local64_set(&hwc->period_left, ktime_to_ns(remaining)); + + hrtimer_cancel(&hwc->hrtimer); + } +} + +static void perf_swevent_init_hrtimer(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!is_sampling_event(event)) + return; + + hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hwc->hrtimer.function = perf_swevent_hrtimer; + + /* + * Since hrtimers have a fixed rate, we can do a static freq->period + * mapping and avoid the whole period adjust feedback stuff. + */ + if (event->attr.freq) { + long freq = event->attr.sample_freq; + + event->attr.sample_period = NSEC_PER_SEC / freq; + hwc->sample_period = event->attr.sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + event->attr.freq = 0; + } +} + +/* + * Software event: cpu wall time clock + */ + +static void cpu_clock_event_update(struct perf_event *event) +{ + s64 prev; + u64 now; + + now = local_clock(); + prev = local64_xchg(&event->hw.prev_count, now); + local64_add(now - prev, &event->count); +} + +static void cpu_clock_event_start(struct perf_event *event, int flags) +{ + local64_set(&event->hw.prev_count, local_clock()); + perf_swevent_start_hrtimer(event); +} + +static void cpu_clock_event_stop(struct perf_event *event, int flags) +{ + perf_swevent_cancel_hrtimer(event); + cpu_clock_event_update(event); +} + +static int cpu_clock_event_add(struct perf_event *event, int flags) +{ + if (flags & PERF_EF_START) + cpu_clock_event_start(event, flags); + + return 0; +} + +static void cpu_clock_event_del(struct perf_event *event, int flags) +{ + cpu_clock_event_stop(event, flags); +} + +static void cpu_clock_event_read(struct perf_event *event) +{ + cpu_clock_event_update(event); +} + +static int cpu_clock_event_init(struct perf_event *event) +{ + if (event->attr.type != PERF_TYPE_SOFTWARE) + return -ENOENT; + + if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) + return -ENOENT; + + perf_swevent_init_hrtimer(event); + + return 0; +} + +static struct pmu perf_cpu_clock = { + .task_ctx_nr = perf_sw_context, + + .event_init = cpu_clock_event_init, + .add = cpu_clock_event_add, + .del = cpu_clock_event_del, + .start = cpu_clock_event_start, + .stop = cpu_clock_event_stop, + .read = cpu_clock_event_read, +}; + +/* + * Software event: task time clock + */ + +static void task_clock_event_update(struct perf_event *event, u64 now) +{ + u64 prev; + s64 delta; + + prev = local64_xchg(&event->hw.prev_count, now); + delta = now - prev; + local64_add(delta, &event->count); +} + +static void task_clock_event_start(struct perf_event *event, int flags) +{ + local64_set(&event->hw.prev_count, event->ctx->time); + perf_swevent_start_hrtimer(event); +} + +static void task_clock_event_stop(struct perf_event *event, int flags) +{ + perf_swevent_cancel_hrtimer(event); + task_clock_event_update(event, event->ctx->time); +} + +static int task_clock_event_add(struct perf_event *event, int flags) +{ + if (flags & PERF_EF_START) + task_clock_event_start(event, flags); + + return 0; +} + +static void task_clock_event_del(struct perf_event *event, int flags) +{ + task_clock_event_stop(event, PERF_EF_UPDATE); +} + +static void task_clock_event_read(struct perf_event *event) +{ + u64 now = perf_clock(); + u64 delta = now - event->ctx->timestamp; + u64 time = event->ctx->time + delta; + + task_clock_event_update(event, time); +} + +static int task_clock_event_init(struct perf_event *event) +{ + if (event->attr.type != PERF_TYPE_SOFTWARE) + return -ENOENT; + + if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) + return -ENOENT; + + perf_swevent_init_hrtimer(event); + + return 0; +} + +static struct pmu perf_task_clock = { + .task_ctx_nr = perf_sw_context, + + .event_init = task_clock_event_init, + .add = task_clock_event_add, + .del = task_clock_event_del, + .start = task_clock_event_start, + .stop = task_clock_event_stop, + .read = task_clock_event_read, +}; + +static void perf_pmu_nop_void(struct pmu *pmu) +{ +} + +static int perf_pmu_nop_int(struct pmu *pmu) +{ + return 0; +} + +static void perf_pmu_start_txn(struct pmu *pmu) +{ + perf_pmu_disable(pmu); +} + +static int perf_pmu_commit_txn(struct pmu *pmu) +{ + perf_pmu_enable(pmu); + return 0; +} + +static void perf_pmu_cancel_txn(struct pmu *pmu) +{ + perf_pmu_enable(pmu); +} + +/* + * Ensures all contexts with the same task_ctx_nr have the same + * pmu_cpu_context too. + */ +static void *find_pmu_context(int ctxn) +{ + struct pmu *pmu; + + if (ctxn < 0) + return NULL; + + list_for_each_entry(pmu, &pmus, entry) { + if (pmu->task_ctx_nr == ctxn) + return pmu->pmu_cpu_context; + } + + return NULL; +} + +static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu) +{ + int cpu; + + for_each_possible_cpu(cpu) { + struct perf_cpu_context *cpuctx; + + cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); + + if (cpuctx->active_pmu == old_pmu) + cpuctx->active_pmu = pmu; + } +} + +static void free_pmu_context(struct pmu *pmu) +{ + struct pmu *i; + + mutex_lock(&pmus_lock); + /* + * Like a real lame refcount. + */ + list_for_each_entry(i, &pmus, entry) { + if (i->pmu_cpu_context == pmu->pmu_cpu_context) { + update_pmu_context(i, pmu); + goto out; + } + } + + free_percpu(pmu->pmu_cpu_context); +out: + mutex_unlock(&pmus_lock); +} +static struct idr pmu_idr; + +static ssize_t +type_show(struct device *dev, struct device_attribute *attr, char *page) +{ + struct pmu *pmu = dev_get_drvdata(dev); + + return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); +} + +static struct device_attribute pmu_dev_attrs[] = { + __ATTR_RO(type), + __ATTR_NULL, +}; + +static int pmu_bus_running; +static struct bus_type pmu_bus = { + .name = "event_source", + .dev_attrs = pmu_dev_attrs, +}; + +static void pmu_dev_release(struct device *dev) +{ + kfree(dev); +} + +static int pmu_dev_alloc(struct pmu *pmu) +{ + int ret = -ENOMEM; + + pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); + if (!pmu->dev) + goto out; + + device_initialize(pmu->dev); + ret = dev_set_name(pmu->dev, "%s", pmu->name); + if (ret) + goto free_dev; + + dev_set_drvdata(pmu->dev, pmu); + pmu->dev->bus = &pmu_bus; + pmu->dev->release = pmu_dev_release; + ret = device_add(pmu->dev); + if (ret) + goto free_dev; + +out: + return ret; + +free_dev: + put_device(pmu->dev); + goto out; +} + +static struct lock_class_key cpuctx_mutex; + +int perf_pmu_register(struct pmu *pmu, char *name, int type) +{ + int cpu, ret; + + mutex_lock(&pmus_lock); + ret = -ENOMEM; + pmu->pmu_disable_count = alloc_percpu(int); + if (!pmu->pmu_disable_count) + goto unlock; + + pmu->type = -1; + if (!name) + goto skip_type; + pmu->name = name; + + if (type < 0) { + int err = idr_pre_get(&pmu_idr, GFP_KERNEL); + if (!err) + goto free_pdc; + + err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type); + if (err) { + ret = err; + goto free_pdc; + } + } + pmu->type = type; + + if (pmu_bus_running) { + ret = pmu_dev_alloc(pmu); + if (ret) + goto free_idr; + } + +skip_type: + pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); + if (pmu->pmu_cpu_context) + goto got_cpu_context; + + pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); + if (!pmu->pmu_cpu_context) + goto free_dev; + + for_each_possible_cpu(cpu) { + struct perf_cpu_context *cpuctx; + + cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); + __perf_event_init_context(&cpuctx->ctx); + lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); + cpuctx->ctx.type = cpu_context; + cpuctx->ctx.pmu = pmu; + cpuctx->jiffies_interval = 1; + INIT_LIST_HEAD(&cpuctx->rotation_list); + cpuctx->active_pmu = pmu; + } + +got_cpu_context: + if (!pmu->start_txn) { + if (pmu->pmu_enable) { + /* + * If we have pmu_enable/pmu_disable calls, install + * transaction stubs that use that to try and batch + * hardware accesses. + */ + pmu->start_txn = perf_pmu_start_txn; + pmu->commit_txn = perf_pmu_commit_txn; + pmu->cancel_txn = perf_pmu_cancel_txn; + } else { + pmu->start_txn = perf_pmu_nop_void; + pmu->commit_txn = perf_pmu_nop_int; + pmu->cancel_txn = perf_pmu_nop_void; + } + } + + if (!pmu->pmu_enable) { + pmu->pmu_enable = perf_pmu_nop_void; + pmu->pmu_disable = perf_pmu_nop_void; + } + + list_add_rcu(&pmu->entry, &pmus); + ret = 0; +unlock: + mutex_unlock(&pmus_lock); + + return ret; + +free_dev: + device_del(pmu->dev); + put_device(pmu->dev); + +free_idr: + if (pmu->type >= PERF_TYPE_MAX) + idr_remove(&pmu_idr, pmu->type); + +free_pdc: + free_percpu(pmu->pmu_disable_count); + goto unlock; +} + +void perf_pmu_unregister(struct pmu *pmu) +{ + mutex_lock(&pmus_lock); + list_del_rcu(&pmu->entry); + mutex_unlock(&pmus_lock); + + /* + * We dereference the pmu list under both SRCU and regular RCU, so + * synchronize against both of those. + */ + synchronize_srcu(&pmus_srcu); + synchronize_rcu(); + + free_percpu(pmu->pmu_disable_count); + if (pmu->type >= PERF_TYPE_MAX) + idr_remove(&pmu_idr, pmu->type); + device_del(pmu->dev); + put_device(pmu->dev); + free_pmu_context(pmu); +} + +struct pmu *perf_init_event(struct perf_event *event) +{ + struct pmu *pmu = NULL; + int idx; + int ret; + + idx = srcu_read_lock(&pmus_srcu); + + rcu_read_lock(); + pmu = idr_find(&pmu_idr, event->attr.type); + rcu_read_unlock(); + if (pmu) { + ret = pmu->event_init(event); + if (ret) + pmu = ERR_PTR(ret); + goto unlock; + } + + list_for_each_entry_rcu(pmu, &pmus, entry) { + ret = pmu->event_init(event); + if (!ret) + goto unlock; + + if (ret != -ENOENT) { + pmu = ERR_PTR(ret); + goto unlock; + } + } + pmu = ERR_PTR(-ENOENT); +unlock: + srcu_read_unlock(&pmus_srcu, idx); + + return pmu; +} + +/* + * Allocate and initialize a event structure + */ +static struct perf_event * +perf_event_alloc(struct perf_event_attr *attr, int cpu, + struct task_struct *task, + struct perf_event *group_leader, + struct perf_event *parent_event, + perf_overflow_handler_t overflow_handler) +{ + struct pmu *pmu; + struct perf_event *event; + struct hw_perf_event *hwc; + long err; + + if ((unsigned)cpu >= nr_cpu_ids) { + if (!task || cpu != -1) + return ERR_PTR(-EINVAL); + } + + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (!event) + return ERR_PTR(-ENOMEM); + + /* + * Single events are their own group leaders, with an + * empty sibling list: + */ + if (!group_leader) + group_leader = event; + + mutex_init(&event->child_mutex); + INIT_LIST_HEAD(&event->child_list); + + INIT_LIST_HEAD(&event->group_entry); + INIT_LIST_HEAD(&event->event_entry); + INIT_LIST_HEAD(&event->sibling_list); + init_waitqueue_head(&event->waitq); + init_irq_work(&event->pending, perf_pending_event); + + mutex_init(&event->mmap_mutex); + + event->cpu = cpu; + event->attr = *attr; + event->group_leader = group_leader; + event->pmu = NULL; + event->oncpu = -1; + + event->parent = parent_event; + + event->ns = get_pid_ns(current->nsproxy->pid_ns); + event->id = atomic64_inc_return(&perf_event_id); + + event->state = PERF_EVENT_STATE_INACTIVE; + + if (task) { + event->attach_state = PERF_ATTACH_TASK; +#ifdef CONFIG_HAVE_HW_BREAKPOINT + /* + * hw_breakpoint is a bit difficult here.. + */ + if (attr->type == PERF_TYPE_BREAKPOINT) + event->hw.bp_target = task; +#endif + } + + if (!overflow_handler && parent_event) + overflow_handler = parent_event->overflow_handler; + + event->overflow_handler = overflow_handler; + + if (attr->disabled) + event->state = PERF_EVENT_STATE_OFF; + + pmu = NULL; + + hwc = &event->hw; + hwc->sample_period = attr->sample_period; + if (attr->freq && attr->sample_freq) + hwc->sample_period = 1; + hwc->last_period = hwc->sample_period; + + local64_set(&hwc->period_left, hwc->sample_period); + + /* + * we currently do not support PERF_FORMAT_GROUP on inherited events + */ + if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) + goto done; + + pmu = perf_init_event(event); + +done: + err = 0; + if (!pmu) + err = -EINVAL; + else if (IS_ERR(pmu)) + err = PTR_ERR(pmu); + + if (err) { + if (event->ns) + put_pid_ns(event->ns); + kfree(event); + return ERR_PTR(err); + } + + event->pmu = pmu; + + if (!event->parent) { + if (event->attach_state & PERF_ATTACH_TASK) + jump_label_inc(&perf_sched_events); + if (event->attr.mmap || event->attr.mmap_data) + atomic_inc(&nr_mmap_events); + if (event->attr.comm) + atomic_inc(&nr_comm_events); + if (event->attr.task) + atomic_inc(&nr_task_events); + if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { + err = get_callchain_buffers(); + if (err) { + free_event(event); + return ERR_PTR(err); + } + } + } + + return event; +} + +static int perf_copy_attr(struct perf_event_attr __user *uattr, + struct perf_event_attr *attr) +{ + u32 size; + int ret; + + if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) + return -EFAULT; + + /* + * zero the full structure, so that a short copy will be nice. + */ + memset(attr, 0, sizeof(*attr)); + + ret = get_user(size, &uattr->size); + if (ret) + return ret; + + if (size > PAGE_SIZE) /* silly large */ + goto err_size; + + if (!size) /* abi compat */ + size = PERF_ATTR_SIZE_VER0; + + if (size < PERF_ATTR_SIZE_VER0) + goto err_size; + + /* + * If we're handed a bigger struct than we know of, + * ensure all the unknown bits are 0 - i.e. new + * user-space does not rely on any kernel feature + * extensions we dont know about yet. + */ + if (size > sizeof(*attr)) { + unsigned char __user *addr; + unsigned char __user *end; + unsigned char val; + + addr = (void __user *)uattr + sizeof(*attr); + end = (void __user *)uattr + size; + + for (; addr < end; addr++) { + ret = get_user(val, addr); + if (ret) + return ret; + if (val) + goto err_size; + } + size = sizeof(*attr); + } + + ret = copy_from_user(attr, uattr, size); + if (ret) + return -EFAULT; + + /* + * If the type exists, the corresponding creation will verify + * the attr->config. + */ + if (attr->type >= PERF_TYPE_MAX) + return -EINVAL; + + if (attr->__reserved_1) + return -EINVAL; + + if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) + return -EINVAL; + + if (attr->read_format & ~(PERF_FORMAT_MAX-1)) + return -EINVAL; + +out: + return ret; + +err_size: + put_user(sizeof(*attr), &uattr->size); + ret = -E2BIG; + goto out; +} + +static int +perf_event_set_output(struct perf_event *event, struct perf_event *output_event) +{ + struct perf_buffer *buffer = NULL, *old_buffer = NULL; + int ret = -EINVAL; + + if (!output_event) + goto set; + + /* don't allow circular references */ + if (event == output_event) + goto out; + + /* + * Don't allow cross-cpu buffers + */ + if (output_event->cpu != event->cpu) + goto out; + + /* + * If its not a per-cpu buffer, it must be the same task. + */ + if (output_event->cpu == -1 && output_event->ctx != event->ctx) + goto out; + +set: + mutex_lock(&event->mmap_mutex); + /* Can't redirect output if we've got an active mmap() */ + if (atomic_read(&event->mmap_count)) + goto unlock; + + if (output_event) { + /* get the buffer we want to redirect to */ + buffer = perf_buffer_get(output_event); + if (!buffer) + goto unlock; + } + + old_buffer = event->buffer; + rcu_assign_pointer(event->buffer, buffer); + ret = 0; +unlock: + mutex_unlock(&event->mmap_mutex); + + if (old_buffer) + perf_buffer_put(old_buffer); +out: + return ret; +} + +/** + * sys_perf_event_open - open a performance event, associate it to a task/cpu + * + * @attr_uptr: event_id type attributes for monitoring/sampling + * @pid: target pid + * @cpu: target cpu + * @group_fd: group leader event fd + */ +SYSCALL_DEFINE5(perf_event_open, + struct perf_event_attr __user *, attr_uptr, + pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) +{ + struct perf_event *group_leader = NULL, *output_event = NULL; + struct perf_event *event, *sibling; + struct perf_event_attr attr; + struct perf_event_context *ctx; + struct file *event_file = NULL; + struct file *group_file = NULL; + struct task_struct *task = NULL; + struct pmu *pmu; + int event_fd; + int move_group = 0; + int fput_needed = 0; + int err; + + /* for future expandability... */ + if (flags & ~PERF_FLAG_ALL) + return -EINVAL; + + err = perf_copy_attr(attr_uptr, &attr); + if (err) + return err; + + if (!attr.exclude_kernel) { + if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) + return -EACCES; + } + + if (attr.freq) { + if (attr.sample_freq > sysctl_perf_event_sample_rate) + return -EINVAL; + } + + /* + * In cgroup mode, the pid argument is used to pass the fd + * opened to the cgroup directory in cgroupfs. The cpu argument + * designates the cpu on which to monitor threads from that + * cgroup. + */ + if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1)) + return -EINVAL; + + event_fd = get_unused_fd_flags(O_RDWR); + if (event_fd < 0) + return event_fd; + + if (group_fd != -1) { + group_leader = perf_fget_light(group_fd, &fput_needed); + if (IS_ERR(group_leader)) { + err = PTR_ERR(group_leader); + goto err_fd; + } + group_file = group_leader->filp; + if (flags & PERF_FLAG_FD_OUTPUT) + output_event = group_leader; + if (flags & PERF_FLAG_FD_NO_GROUP) + group_leader = NULL; + } + + if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { + task = find_lively_task_by_vpid(pid); + if (IS_ERR(task)) { + err = PTR_ERR(task); + goto err_group_fd; + } + } + + event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL); + if (IS_ERR(event)) { + err = PTR_ERR(event); + goto err_task; + } + + if (flags & PERF_FLAG_PID_CGROUP) { + err = perf_cgroup_connect(pid, event, &attr, group_leader); + if (err) + goto err_alloc; + /* + * one more event: + * - that has cgroup constraint on event->cpu + * - that may need work on context switch + */ + atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); + jump_label_inc(&perf_sched_events); + } + + /* + * Special case software events and allow them to be part of + * any hardware group. + */ + pmu = event->pmu; + + if (group_leader && + (is_software_event(event) != is_software_event(group_leader))) { + if (is_software_event(event)) { + /* + * If event and group_leader are not both a software + * event, and event is, then group leader is not. + * + * Allow the addition of software events to !software + * groups, this is safe because software events never + * fail to schedule. + */ + pmu = group_leader->pmu; + } else if (is_software_event(group_leader) && + (group_leader->group_flags & PERF_GROUP_SOFTWARE)) { + /* + * In case the group is a pure software group, and we + * try to add a hardware event, move the whole group to + * the hardware context. + */ + move_group = 1; + } + } + + /* + * Get the target context (task or percpu): + */ + ctx = find_get_context(pmu, task, cpu); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto err_alloc; + } + + if (task) { + put_task_struct(task); + task = NULL; + } + + /* + * Look up the group leader (we will attach this event to it): + */ + if (group_leader) { + err = -EINVAL; + + /* + * Do not allow a recursive hierarchy (this new sibling + * becoming part of another group-sibling): + */ + if (group_leader->group_leader != group_leader) + goto err_context; + /* + * Do not allow to attach to a group in a different + * task or CPU context: + */ + if (move_group) { + if (group_leader->ctx->type != ctx->type) + goto err_context; + } else { + if (group_leader->ctx != ctx) + goto err_context; + } + + /* + * Only a group leader can be exclusive or pinned + */ + if (attr.exclusive || attr.pinned) + goto err_context; + } + + if (output_event) { + err = perf_event_set_output(event, output_event); + if (err) + goto err_context; + } + + event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR); + if (IS_ERR(event_file)) { + err = PTR_ERR(event_file); + goto err_context; + } + + if (move_group) { + struct perf_event_context *gctx = group_leader->ctx; + + mutex_lock(&gctx->mutex); + perf_remove_from_context(group_leader); + list_for_each_entry(sibling, &group_leader->sibling_list, + group_entry) { + perf_remove_from_context(sibling); + put_ctx(gctx); + } + mutex_unlock(&gctx->mutex); + put_ctx(gctx); + } + + event->filp = event_file; + WARN_ON_ONCE(ctx->parent_ctx); + mutex_lock(&ctx->mutex); + + if (move_group) { + perf_install_in_context(ctx, group_leader, cpu); + get_ctx(ctx); + list_for_each_entry(sibling, &group_leader->sibling_list, + group_entry) { + perf_install_in_context(ctx, sibling, cpu); + get_ctx(ctx); + } + } + + perf_install_in_context(ctx, event, cpu); + ++ctx->generation; + perf_unpin_context(ctx); + mutex_unlock(&ctx->mutex); + + event->owner = current; + + mutex_lock(¤t->perf_event_mutex); + list_add_tail(&event->owner_entry, ¤t->perf_event_list); + mutex_unlock(¤t->perf_event_mutex); + + /* + * Precalculate sample_data sizes + */ + perf_event__header_size(event); + perf_event__id_header_size(event); + + /* + * Drop the reference on the group_event after placing the + * new event on the sibling_list. This ensures destruction + * of the group leader will find the pointer to itself in + * perf_group_detach(). + */ + fput_light(group_file, fput_needed); + fd_install(event_fd, event_file); + return event_fd; + +err_context: + perf_unpin_context(ctx); + put_ctx(ctx); +err_alloc: + free_event(event); +err_task: + if (task) + put_task_struct(task); +err_group_fd: + fput_light(group_file, fput_needed); +err_fd: + put_unused_fd(event_fd); + return err; +} + +/** + * perf_event_create_kernel_counter + * + * @attr: attributes of the counter to create + * @cpu: cpu in which the counter is bound + * @task: task to profile (NULL for percpu) + */ +struct perf_event * +perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, + struct task_struct *task, + perf_overflow_handler_t overflow_handler) +{ + struct perf_event_context *ctx; + struct perf_event *event; + int err; + + /* + * Get the target context (task or percpu): + */ + + event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler); + if (IS_ERR(event)) { + err = PTR_ERR(event); + goto err; + } + + ctx = find_get_context(event->pmu, task, cpu); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto err_free; + } + + event->filp = NULL; + WARN_ON_ONCE(ctx->parent_ctx); + mutex_lock(&ctx->mutex); + perf_install_in_context(ctx, event, cpu); + ++ctx->generation; + perf_unpin_context(ctx); + mutex_unlock(&ctx->mutex); + + return event; + +err_free: + free_event(event); +err: + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); + +static void sync_child_event(struct perf_event *child_event, + struct task_struct *child) +{ + struct perf_event *parent_event = child_event->parent; + u64 child_val; + + if (child_event->attr.inherit_stat) + perf_event_read_event(child_event, child); + + child_val = perf_event_count(child_event); + + /* + * Add back the child's count to the parent's count: + */ + atomic64_add(child_val, &parent_event->child_count); + atomic64_add(child_event->total_time_enabled, + &parent_event->child_total_time_enabled); + atomic64_add(child_event->total_time_running, + &parent_event->child_total_time_running); + + /* + * Remove this event from the parent's list + */ + WARN_ON_ONCE(parent_event->ctx->parent_ctx); + mutex_lock(&parent_event->child_mutex); + list_del_init(&child_event->child_list); + mutex_unlock(&parent_event->child_mutex); + + /* + * Release the parent event, if this was the last + * reference to it. + */ + fput(parent_event->filp); +} + +static void +__perf_event_exit_task(struct perf_event *child_event, + struct perf_event_context *child_ctx, + struct task_struct *child) +{ + if (child_event->parent) { + raw_spin_lock_irq(&child_ctx->lock); + perf_group_detach(child_event); + raw_spin_unlock_irq(&child_ctx->lock); + } + + perf_remove_from_context(child_event); + + /* + * It can happen that the parent exits first, and has events + * that are still around due to the child reference. These + * events need to be zapped. + */ + if (child_event->parent) { + sync_child_event(child_event, child); + free_event(child_event); + } +} + +static void perf_event_exit_task_context(struct task_struct *child, int ctxn) +{ + struct perf_event *child_event, *tmp; + struct perf_event_context *child_ctx; + unsigned long flags; + + if (likely(!child->perf_event_ctxp[ctxn])) { + perf_event_task(child, NULL, 0); + return; + } + + local_irq_save(flags); + /* + * We can't reschedule here because interrupts are disabled, + * and either child is current or it is a task that can't be + * scheduled, so we are now safe from rescheduling changing + * our context. + */ + child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); + task_ctx_sched_out(child_ctx, EVENT_ALL); + + /* + * Take the context lock here so that if find_get_context is + * reading child->perf_event_ctxp, we wait until it has + * incremented the context's refcount before we do put_ctx below. + */ + raw_spin_lock(&child_ctx->lock); + child->perf_event_ctxp[ctxn] = NULL; + /* + * If this context is a clone; unclone it so it can't get + * swapped to another process while we're removing all + * the events from it. + */ + unclone_ctx(child_ctx); + update_context_time(child_ctx); + raw_spin_unlock_irqrestore(&child_ctx->lock, flags); + + /* + * Report the task dead after unscheduling the events so that we + * won't get any samples after PERF_RECORD_EXIT. We can however still + * get a few PERF_RECORD_READ events. + */ + perf_event_task(child, child_ctx, 0); + + /* + * We can recurse on the same lock type through: + * + * __perf_event_exit_task() + * sync_child_event() + * fput(parent_event->filp) + * perf_release() + * mutex_lock(&ctx->mutex) + * + * But since its the parent context it won't be the same instance. + */ + mutex_lock(&child_ctx->mutex); + +again: + list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups, + group_entry) + __perf_event_exit_task(child_event, child_ctx, child); + + list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups, + group_entry) + __perf_event_exit_task(child_event, child_ctx, child); + + /* + * If the last event was a group event, it will have appended all + * its siblings to the list, but we obtained 'tmp' before that which + * will still point to the list head terminating the iteration. + */ + if (!list_empty(&child_ctx->pinned_groups) || + !list_empty(&child_ctx->flexible_groups)) + goto again; + + mutex_unlock(&child_ctx->mutex); + + put_ctx(child_ctx); +} + +/* + * When a child task exits, feed back event values to parent events. + */ +void perf_event_exit_task(struct task_struct *child) +{ + struct perf_event *event, *tmp; + int ctxn; + + mutex_lock(&child->perf_event_mutex); + list_for_each_entry_safe(event, tmp, &child->perf_event_list, + owner_entry) { + list_del_init(&event->owner_entry); + + /* + * Ensure the list deletion is visible before we clear + * the owner, closes a race against perf_release() where + * we need to serialize on the owner->perf_event_mutex. + */ + smp_wmb(); + event->owner = NULL; + } + mutex_unlock(&child->perf_event_mutex); + + for_each_task_context_nr(ctxn) + perf_event_exit_task_context(child, ctxn); +} + +static void perf_free_event(struct perf_event *event, + struct perf_event_context *ctx) +{ + struct perf_event *parent = event->parent; + + if (WARN_ON_ONCE(!parent)) + return; + + mutex_lock(&parent->child_mutex); + list_del_init(&event->child_list); + mutex_unlock(&parent->child_mutex); + + fput(parent->filp); + + perf_group_detach(event); + list_del_event(event, ctx); + free_event(event); +} + +/* + * free an unexposed, unused context as created by inheritance by + * perf_event_init_task below, used by fork() in case of fail. + */ +void perf_event_free_task(struct task_struct *task) +{ + struct perf_event_context *ctx; + struct perf_event *event, *tmp; + int ctxn; + + for_each_task_context_nr(ctxn) { + ctx = task->perf_event_ctxp[ctxn]; + if (!ctx) + continue; + + mutex_lock(&ctx->mutex); +again: + list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, + group_entry) + perf_free_event(event, ctx); + + list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, + group_entry) + perf_free_event(event, ctx); + + if (!list_empty(&ctx->pinned_groups) || + !list_empty(&ctx->flexible_groups)) + goto again; + + mutex_unlock(&ctx->mutex); + + put_ctx(ctx); + } +} + +void perf_event_delayed_put(struct task_struct *task) +{ + int ctxn; + + for_each_task_context_nr(ctxn) + WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); +} + +/* + * inherit a event from parent task to child task: + */ +static struct perf_event * +inherit_event(struct perf_event *parent_event, + struct task_struct *parent, + struct perf_event_context *parent_ctx, + struct task_struct *child, + struct perf_event *group_leader, + struct perf_event_context *child_ctx) +{ + struct perf_event *child_event; + unsigned long flags; + + /* + * Instead of creating recursive hierarchies of events, + * we link inherited events back to the original parent, + * which has a filp for sure, which we use as the reference + * count: + */ + if (parent_event->parent) + parent_event = parent_event->parent; + + child_event = perf_event_alloc(&parent_event->attr, + parent_event->cpu, + child, + group_leader, parent_event, + NULL); + if (IS_ERR(child_event)) + return child_event; + get_ctx(child_ctx); + + /* + * Make the child state follow the state of the parent event, + * not its attr.disabled bit. We hold the parent's mutex, + * so we won't race with perf_event_{en, dis}able_family. + */ + if (parent_event->state >= PERF_EVENT_STATE_INACTIVE) + child_event->state = PERF_EVENT_STATE_INACTIVE; + else + child_event->state = PERF_EVENT_STATE_OFF; + + if (parent_event->attr.freq) { + u64 sample_period = parent_event->hw.sample_period; + struct hw_perf_event *hwc = &child_event->hw; + + hwc->sample_period = sample_period; + hwc->last_period = sample_period; + + local64_set(&hwc->period_left, sample_period); + } + + child_event->ctx = child_ctx; + child_event->overflow_handler = parent_event->overflow_handler; + + /* + * Precalculate sample_data sizes + */ + perf_event__header_size(child_event); + perf_event__id_header_size(child_event); + + /* + * Link it up in the child's context: + */ + raw_spin_lock_irqsave(&child_ctx->lock, flags); + add_event_to_ctx(child_event, child_ctx); + raw_spin_unlock_irqrestore(&child_ctx->lock, flags); + + /* + * Get a reference to the parent filp - we will fput it + * when the child event exits. This is safe to do because + * we are in the parent and we know that the filp still + * exists and has a nonzero count: + */ + atomic_long_inc(&parent_event->filp->f_count); + + /* + * Link this into the parent event's child list + */ + WARN_ON_ONCE(parent_event->ctx->parent_ctx); + mutex_lock(&parent_event->child_mutex); + list_add_tail(&child_event->child_list, &parent_event->child_list); + mutex_unlock(&parent_event->child_mutex); + + return child_event; +} + +static int inherit_group(struct perf_event *parent_event, + struct task_struct *parent, + struct perf_event_context *parent_ctx, + struct task_struct *child, + struct perf_event_context *child_ctx) +{ + struct perf_event *leader; + struct perf_event *sub; + struct perf_event *child_ctr; + + leader = inherit_event(parent_event, parent, parent_ctx, + child, NULL, child_ctx); + if (IS_ERR(leader)) + return PTR_ERR(leader); + list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { + child_ctr = inherit_event(sub, parent, parent_ctx, + child, leader, child_ctx); + if (IS_ERR(child_ctr)) + return PTR_ERR(child_ctr); + } + return 0; +} + +static int +inherit_task_group(struct perf_event *event, struct task_struct *parent, + struct perf_event_context *parent_ctx, + struct task_struct *child, int ctxn, + int *inherited_all) +{ + int ret; + struct perf_event_context *child_ctx; + + if (!event->attr.inherit) { + *inherited_all = 0; + return 0; + } + + child_ctx = child->perf_event_ctxp[ctxn]; + if (!child_ctx) { + /* + * This is executed from the parent task context, so + * inherit events that have been marked for cloning. + * First allocate and initialize a context for the + * child. + */ + + child_ctx = alloc_perf_context(event->pmu, child); + if (!child_ctx) + return -ENOMEM; + + child->perf_event_ctxp[ctxn] = child_ctx; + } + + ret = inherit_group(event, parent, parent_ctx, + child, child_ctx); + + if (ret) + *inherited_all = 0; + + return ret; +} + +/* + * Initialize the perf_event context in task_struct + */ +int perf_event_init_context(struct task_struct *child, int ctxn) +{ + struct perf_event_context *child_ctx, *parent_ctx; + struct perf_event_context *cloned_ctx; + struct perf_event *event; + struct task_struct *parent = current; + int inherited_all = 1; + unsigned long flags; + int ret = 0; + + if (likely(!parent->perf_event_ctxp[ctxn])) + return 0; + + /* + * If the parent's context is a clone, pin it so it won't get + * swapped under us. + */ + parent_ctx = perf_pin_task_context(parent, ctxn); + + /* + * No need to check if parent_ctx != NULL here; since we saw + * it non-NULL earlier, the only reason for it to become NULL + * is if we exit, and since we're currently in the middle of + * a fork we can't be exiting at the same time. + */ + + /* + * Lock the parent list. No need to lock the child - not PID + * hashed yet and not running, so nobody can access it. + */ + mutex_lock(&parent_ctx->mutex); + + /* + * We dont have to disable NMIs - we are only looking at + * the list, not manipulating it: + */ + list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { + ret = inherit_task_group(event, parent, parent_ctx, + child, ctxn, &inherited_all); + if (ret) + break; + } + + /* + * We can't hold ctx->lock when iterating the ->flexible_group list due + * to allocations, but we need to prevent rotation because + * rotate_ctx() will change the list from interrupt context. + */ + raw_spin_lock_irqsave(&parent_ctx->lock, flags); + parent_ctx->rotate_disable = 1; + raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); + + list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { + ret = inherit_task_group(event, parent, parent_ctx, + child, ctxn, &inherited_all); + if (ret) + break; + } + + raw_spin_lock_irqsave(&parent_ctx->lock, flags); + parent_ctx->rotate_disable = 0; + + child_ctx = child->perf_event_ctxp[ctxn]; + + if (child_ctx && inherited_all) { + /* + * Mark the child context as a clone of the parent + * context, or of whatever the parent is a clone of. + * + * Note that if the parent is a clone, the holding of + * parent_ctx->lock avoids it from being uncloned. + */ + cloned_ctx = parent_ctx->parent_ctx; + if (cloned_ctx) { + child_ctx->parent_ctx = cloned_ctx; + child_ctx->parent_gen = parent_ctx->parent_gen; + } else { + child_ctx->parent_ctx = parent_ctx; + child_ctx->parent_gen = parent_ctx->generation; + } + get_ctx(child_ctx->parent_ctx); + } + + raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); + mutex_unlock(&parent_ctx->mutex); + + perf_unpin_context(parent_ctx); + put_ctx(parent_ctx); + + return ret; +} + +/* + * Initialize the perf_event context in task_struct + */ +int perf_event_init_task(struct task_struct *child) +{ + int ctxn, ret; + + memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); + mutex_init(&child->perf_event_mutex); + INIT_LIST_HEAD(&child->perf_event_list); + + for_each_task_context_nr(ctxn) { + ret = perf_event_init_context(child, ctxn); + if (ret) + return ret; + } + + return 0; +} + +static void __init perf_event_init_all_cpus(void) +{ + struct swevent_htable *swhash; + int cpu; + + for_each_possible_cpu(cpu) { + swhash = &per_cpu(swevent_htable, cpu); + mutex_init(&swhash->hlist_mutex); + INIT_LIST_HEAD(&per_cpu(rotation_list, cpu)); + } +} + +static void __cpuinit perf_event_init_cpu(int cpu) +{ + struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); + + mutex_lock(&swhash->hlist_mutex); + if (swhash->hlist_refcount > 0) { + struct swevent_hlist *hlist; + + hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); + WARN_ON(!hlist); + rcu_assign_pointer(swhash->swevent_hlist, hlist); + } + mutex_unlock(&swhash->hlist_mutex); +} + +#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC +static void perf_pmu_rotate_stop(struct pmu *pmu) +{ + struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); + + WARN_ON(!irqs_disabled()); + + list_del_init(&cpuctx->rotation_list); +} + +static void __perf_event_exit_context(void *__info) +{ + struct perf_event_context *ctx = __info; + struct perf_event *event, *tmp; + + perf_pmu_rotate_stop(ctx->pmu); + + list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) + __perf_remove_from_context(event); + list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry) + __perf_remove_from_context(event); +} + +static void perf_event_exit_cpu_context(int cpu) +{ + struct perf_event_context *ctx; + struct pmu *pmu; + int idx; + + idx = srcu_read_lock(&pmus_srcu); + list_for_each_entry_rcu(pmu, &pmus, entry) { + ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; + + mutex_lock(&ctx->mutex); + smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); + mutex_unlock(&ctx->mutex); + } + srcu_read_unlock(&pmus_srcu, idx); +} + +static void perf_event_exit_cpu(int cpu) +{ + struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); + + mutex_lock(&swhash->hlist_mutex); + swevent_hlist_release(swhash); + mutex_unlock(&swhash->hlist_mutex); + + perf_event_exit_cpu_context(cpu); +} +#else +static inline void perf_event_exit_cpu(int cpu) { } +#endif + +static int +perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) +{ + int cpu; + + for_each_online_cpu(cpu) + perf_event_exit_cpu(cpu); + + return NOTIFY_OK; +} + +/* + * Run the perf reboot notifier at the very last possible moment so that + * the generic watchdog code runs as long as possible. + */ +static struct notifier_block perf_reboot_notifier = { + .notifier_call = perf_reboot, + .priority = INT_MIN, +}; + +static int __cpuinit +perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) +{ + unsigned int cpu = (long)hcpu; + + switch (action & ~CPU_TASKS_FROZEN) { + + case CPU_UP_PREPARE: + case CPU_DOWN_FAILED: + perf_event_init_cpu(cpu); + break; + + case CPU_UP_CANCELED: + case CPU_DOWN_PREPARE: + perf_event_exit_cpu(cpu); + break; + + default: + break; + } + + return NOTIFY_OK; +} + +void __init perf_event_init(void) +{ + int ret; + + idr_init(&pmu_idr); + + perf_event_init_all_cpus(); + init_srcu_struct(&pmus_srcu); + perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); + perf_pmu_register(&perf_cpu_clock, NULL, -1); + perf_pmu_register(&perf_task_clock, NULL, -1); + perf_tp_register(); + perf_cpu_notifier(perf_cpu_notify); + register_reboot_notifier(&perf_reboot_notifier); + + ret = init_hw_breakpoint(); + WARN(ret, "hw_breakpoint initialization failed with: %d", ret); +} + +static int __init perf_event_sysfs_init(void) +{ + struct pmu *pmu; + int ret; + + mutex_lock(&pmus_lock); + + ret = bus_register(&pmu_bus); + if (ret) + goto unlock; + + list_for_each_entry(pmu, &pmus, entry) { + if (!pmu->name || pmu->type < 0) + continue; + + ret = pmu_dev_alloc(pmu); + WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); + } + pmu_bus_running = 1; + ret = 0; + +unlock: + mutex_unlock(&pmus_lock); + + return ret; +} +device_initcall(perf_event_sysfs_init); + +#ifdef CONFIG_CGROUP_PERF +static struct cgroup_subsys_state *perf_cgroup_create( + struct cgroup_subsys *ss, struct cgroup *cont) +{ + struct perf_cgroup *jc; + + jc = kzalloc(sizeof(*jc), GFP_KERNEL); + if (!jc) + return ERR_PTR(-ENOMEM); + + jc->info = alloc_percpu(struct perf_cgroup_info); + if (!jc->info) { + kfree(jc); + return ERR_PTR(-ENOMEM); + } + + return &jc->css; +} + +static void perf_cgroup_destroy(struct cgroup_subsys *ss, + struct cgroup *cont) +{ + struct perf_cgroup *jc; + jc = container_of(cgroup_subsys_state(cont, perf_subsys_id), + struct perf_cgroup, css); + free_percpu(jc->info); + kfree(jc); +} + +static int __perf_cgroup_move(void *info) +{ + struct task_struct *task = info; + perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); + return 0; +} + +static void perf_cgroup_move(struct task_struct *task) +{ + task_function_call(task, __perf_cgroup_move, task); +} + +static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, + struct cgroup *old_cgrp, struct task_struct *task, + bool threadgroup) +{ + perf_cgroup_move(task); + if (threadgroup) { + struct task_struct *c; + rcu_read_lock(); + list_for_each_entry_rcu(c, &task->thread_group, thread_group) { + perf_cgroup_move(c); + } + rcu_read_unlock(); + } +} + +static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, + struct cgroup *old_cgrp, struct task_struct *task) +{ + /* + * cgroup_exit() is called in the copy_process() failure path. + * Ignore this case since the task hasn't ran yet, this avoids + * trying to poke a half freed task state from generic code. + */ + if (!(task->flags & PF_EXITING)) + return; + + perf_cgroup_move(task); +} + +struct cgroup_subsys perf_subsys = { + .name = "perf_event", + .subsys_id = perf_subsys_id, + .create = perf_cgroup_create, + .destroy = perf_cgroup_destroy, + .exit = perf_cgroup_exit, + .attach = perf_cgroup_attach, +}; +#endif /* CONFIG_CGROUP_PERF */ diff --git a/kernel/perf_event.c b/kernel/perf_event.c deleted file mode 100644 index 440bc485bbf..00000000000 --- a/kernel/perf_event.c +++ /dev/null @@ -1,7455 +0,0 @@ -/* - * Performance events core code: - * - * Copyright (C) 2008 Thomas Gleixner - * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar - * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra - * Copyright © 2009 Paul Mackerras, IBM Corp. - * - * For licensing details see kernel-base/COPYING - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -struct remote_function_call { - struct task_struct *p; - int (*func)(void *info); - void *info; - int ret; -}; - -static void remote_function(void *data) -{ - struct remote_function_call *tfc = data; - struct task_struct *p = tfc->p; - - if (p) { - tfc->ret = -EAGAIN; - if (task_cpu(p) != smp_processor_id() || !task_curr(p)) - return; - } - - tfc->ret = tfc->func(tfc->info); -} - -/** - * task_function_call - call a function on the cpu on which a task runs - * @p: the task to evaluate - * @func: the function to be called - * @info: the function call argument - * - * Calls the function @func when the task is currently running. This might - * be on the current CPU, which just calls the function directly - * - * returns: @func return value, or - * -ESRCH - when the process isn't running - * -EAGAIN - when the process moved away - */ -static int -task_function_call(struct task_struct *p, int (*func) (void *info), void *info) -{ - struct remote_function_call data = { - .p = p, - .func = func, - .info = info, - .ret = -ESRCH, /* No such (running) process */ - }; - - if (task_curr(p)) - smp_call_function_single(task_cpu(p), remote_function, &data, 1); - - return data.ret; -} - -/** - * cpu_function_call - call a function on the cpu - * @func: the function to be called - * @info: the function call argument - * - * Calls the function @func on the remote cpu. - * - * returns: @func return value or -ENXIO when the cpu is offline - */ -static int cpu_function_call(int cpu, int (*func) (void *info), void *info) -{ - struct remote_function_call data = { - .p = NULL, - .func = func, - .info = info, - .ret = -ENXIO, /* No such CPU */ - }; - - smp_call_function_single(cpu, remote_function, &data, 1); - - return data.ret; -} - -#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ - PERF_FLAG_FD_OUTPUT |\ - PERF_FLAG_PID_CGROUP) - -enum event_type_t { - EVENT_FLEXIBLE = 0x1, - EVENT_PINNED = 0x2, - EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, -}; - -/* - * perf_sched_events : >0 events exist - * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu - */ -struct jump_label_key perf_sched_events __read_mostly; -static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); - -static atomic_t nr_mmap_events __read_mostly; -static atomic_t nr_comm_events __read_mostly; -static atomic_t nr_task_events __read_mostly; - -static LIST_HEAD(pmus); -static DEFINE_MUTEX(pmus_lock); -static struct srcu_struct pmus_srcu; - -/* - * perf event paranoia level: - * -1 - not paranoid at all - * 0 - disallow raw tracepoint access for unpriv - * 1 - disallow cpu events for unpriv - * 2 - disallow kernel profiling for unpriv - */ -int sysctl_perf_event_paranoid __read_mostly = 1; - -/* Minimum for 512 kiB + 1 user control page */ -int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ - -/* - * max perf event sample rate - */ -#define DEFAULT_MAX_SAMPLE_RATE 100000 -int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; -static int max_samples_per_tick __read_mostly = - DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); - -int perf_proc_update_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos) -{ - int ret = proc_dointvec(table, write, buffer, lenp, ppos); - - if (ret || !write) - return ret; - - max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); - - return 0; -} - -static atomic64_t perf_event_id; - -static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, - enum event_type_t event_type); - -static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, - enum event_type_t event_type, - struct task_struct *task); - -static void update_context_time(struct perf_event_context *ctx); -static u64 perf_event_time(struct perf_event *event); - -void __weak perf_event_print_debug(void) { } - -extern __weak const char *perf_pmu_name(void) -{ - return "pmu"; -} - -static inline u64 perf_clock(void) -{ - return local_clock(); -} - -static inline struct perf_cpu_context * -__get_cpu_context(struct perf_event_context *ctx) -{ - return this_cpu_ptr(ctx->pmu->pmu_cpu_context); -} - -#ifdef CONFIG_CGROUP_PERF - -/* - * Must ensure cgroup is pinned (css_get) before calling - * this function. In other words, we cannot call this function - * if there is no cgroup event for the current CPU context. - */ -static inline struct perf_cgroup * -perf_cgroup_from_task(struct task_struct *task) -{ - return container_of(task_subsys_state(task, perf_subsys_id), - struct perf_cgroup, css); -} - -static inline bool -perf_cgroup_match(struct perf_event *event) -{ - struct perf_event_context *ctx = event->ctx; - struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); - - return !event->cgrp || event->cgrp == cpuctx->cgrp; -} - -static inline void perf_get_cgroup(struct perf_event *event) -{ - css_get(&event->cgrp->css); -} - -static inline void perf_put_cgroup(struct perf_event *event) -{ - css_put(&event->cgrp->css); -} - -static inline void perf_detach_cgroup(struct perf_event *event) -{ - perf_put_cgroup(event); - event->cgrp = NULL; -} - -static inline int is_cgroup_event(struct perf_event *event) -{ - return event->cgrp != NULL; -} - -static inline u64 perf_cgroup_event_time(struct perf_event *event) -{ - struct perf_cgroup_info *t; - - t = per_cpu_ptr(event->cgrp->info, event->cpu); - return t->time; -} - -static inline void __update_cgrp_time(struct perf_cgroup *cgrp) -{ - struct perf_cgroup_info *info; - u64 now; - - now = perf_clock(); - - info = this_cpu_ptr(cgrp->info); - - info->time += now - info->timestamp; - info->timestamp = now; -} - -static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) -{ - struct perf_cgroup *cgrp_out = cpuctx->cgrp; - if (cgrp_out) - __update_cgrp_time(cgrp_out); -} - -static inline void update_cgrp_time_from_event(struct perf_event *event) -{ - struct perf_cgroup *cgrp; - - /* - * ensure we access cgroup data only when needed and - * when we know the cgroup is pinned (css_get) - */ - if (!is_cgroup_event(event)) - return; - - cgrp = perf_cgroup_from_task(current); - /* - * Do not update time when cgroup is not active - */ - if (cgrp == event->cgrp) - __update_cgrp_time(event->cgrp); -} - -static inline void -perf_cgroup_set_timestamp(struct task_struct *task, - struct perf_event_context *ctx) -{ - struct perf_cgroup *cgrp; - struct perf_cgroup_info *info; - - /* - * ctx->lock held by caller - * ensure we do not access cgroup data - * unless we have the cgroup pinned (css_get) - */ - if (!task || !ctx->nr_cgroups) - return; - - cgrp = perf_cgroup_from_task(task); - info = this_cpu_ptr(cgrp->info); - info->timestamp = ctx->timestamp; -} - -#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */ -#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */ - -/* - * reschedule events based on the cgroup constraint of task. - * - * mode SWOUT : schedule out everything - * mode SWIN : schedule in based on cgroup for next - */ -void perf_cgroup_switch(struct task_struct *task, int mode) -{ - struct perf_cpu_context *cpuctx; - struct pmu *pmu; - unsigned long flags; - - /* - * disable interrupts to avoid geting nr_cgroup - * changes via __perf_event_disable(). Also - * avoids preemption. - */ - local_irq_save(flags); - - /* - * we reschedule only in the presence of cgroup - * constrained events. - */ - rcu_read_lock(); - - list_for_each_entry_rcu(pmu, &pmus, entry) { - - cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); - - perf_pmu_disable(cpuctx->ctx.pmu); - - /* - * perf_cgroup_events says at least one - * context on this CPU has cgroup events. - * - * ctx->nr_cgroups reports the number of cgroup - * events for a context. - */ - if (cpuctx->ctx.nr_cgroups > 0) { - - if (mode & PERF_CGROUP_SWOUT) { - cpu_ctx_sched_out(cpuctx, EVENT_ALL); - /* - * must not be done before ctxswout due - * to event_filter_match() in event_sched_out() - */ - cpuctx->cgrp = NULL; - } - - if (mode & PERF_CGROUP_SWIN) { - WARN_ON_ONCE(cpuctx->cgrp); - /* set cgrp before ctxsw in to - * allow event_filter_match() to not - * have to pass task around - */ - cpuctx->cgrp = perf_cgroup_from_task(task); - cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); - } - } - - perf_pmu_enable(cpuctx->ctx.pmu); - } - - rcu_read_unlock(); - - local_irq_restore(flags); -} - -static inline void perf_cgroup_sched_out(struct task_struct *task) -{ - perf_cgroup_switch(task, PERF_CGROUP_SWOUT); -} - -static inline void perf_cgroup_sched_in(struct task_struct *task) -{ - perf_cgroup_switch(task, PERF_CGROUP_SWIN); -} - -static inline int perf_cgroup_connect(int fd, struct perf_event *event, - struct perf_event_attr *attr, - struct perf_event *group_leader) -{ - struct perf_cgroup *cgrp; - struct cgroup_subsys_state *css; - struct file *file; - int ret = 0, fput_needed; - - file = fget_light(fd, &fput_needed); - if (!file) - return -EBADF; - - css = cgroup_css_from_dir(file, perf_subsys_id); - if (IS_ERR(css)) { - ret = PTR_ERR(css); - goto out; - } - - cgrp = container_of(css, struct perf_cgroup, css); - event->cgrp = cgrp; - - /* must be done before we fput() the file */ - perf_get_cgroup(event); - - /* - * all events in a group must monitor - * the same cgroup because a task belongs - * to only one perf cgroup at a time - */ - if (group_leader && group_leader->cgrp != cgrp) { - perf_detach_cgroup(event); - ret = -EINVAL; - } -out: - fput_light(file, fput_needed); - return ret; -} - -static inline void -perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) -{ - struct perf_cgroup_info *t; - t = per_cpu_ptr(event->cgrp->info, event->cpu); - event->shadow_ctx_time = now - t->timestamp; -} - -static inline void -perf_cgroup_defer_enabled(struct perf_event *event) -{ - /* - * when the current task's perf cgroup does not match - * the event's, we need to remember to call the - * perf_mark_enable() function the first time a task with - * a matching perf cgroup is scheduled in. - */ - if (is_cgroup_event(event) && !perf_cgroup_match(event)) - event->cgrp_defer_enabled = 1; -} - -static inline void -perf_cgroup_mark_enabled(struct perf_event *event, - struct perf_event_context *ctx) -{ - struct perf_event *sub; - u64 tstamp = perf_event_time(event); - - if (!event->cgrp_defer_enabled) - return; - - event->cgrp_defer_enabled = 0; - - event->tstamp_enabled = tstamp - event->total_time_enabled; - list_for_each_entry(sub, &event->sibling_list, group_entry) { - if (sub->state >= PERF_EVENT_STATE_INACTIVE) { - sub->tstamp_enabled = tstamp - sub->total_time_enabled; - sub->cgrp_defer_enabled = 0; - } - } -} -#else /* !CONFIG_CGROUP_PERF */ - -static inline bool -perf_cgroup_match(struct perf_event *event) -{ - return true; -} - -static inline void perf_detach_cgroup(struct perf_event *event) -{} - -static inline int is_cgroup_event(struct perf_event *event) -{ - return 0; -} - -static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event) -{ - return 0; -} - -static inline void update_cgrp_time_from_event(struct perf_event *event) -{ -} - -static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) -{ -} - -static inline void perf_cgroup_sched_out(struct task_struct *task) -{ -} - -static inline void perf_cgroup_sched_in(struct task_struct *task) -{ -} - -static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, - struct perf_event_attr *attr, - struct perf_event *group_leader) -{ - return -EINVAL; -} - -static inline void -perf_cgroup_set_timestamp(struct task_struct *task, - struct perf_event_context *ctx) -{ -} - -void -perf_cgroup_switch(struct task_struct *task, struct task_struct *next) -{ -} - -static inline void -perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) -{ -} - -static inline u64 perf_cgroup_event_time(struct perf_event *event) -{ - return 0; -} - -static inline void -perf_cgroup_defer_enabled(struct perf_event *event) -{ -} - -static inline void -perf_cgroup_mark_enabled(struct perf_event *event, - struct perf_event_context *ctx) -{ -} -#endif - -void perf_pmu_disable(struct pmu *pmu) -{ - int *count = this_cpu_ptr(pmu->pmu_disable_count); - if (!(*count)++) - pmu->pmu_disable(pmu); -} - -void perf_pmu_enable(struct pmu *pmu) -{ - int *count = this_cpu_ptr(pmu->pmu_disable_count); - if (!--(*count)) - pmu->pmu_enable(pmu); -} - -static DEFINE_PER_CPU(struct list_head, rotation_list); - -/* - * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized - * because they're strictly cpu affine and rotate_start is called with IRQs - * disabled, while rotate_context is called from IRQ context. - */ -static void perf_pmu_rotate_start(struct pmu *pmu) -{ - struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); - struct list_head *head = &__get_cpu_var(rotation_list); - - WARN_ON(!irqs_disabled()); - - if (list_empty(&cpuctx->rotation_list)) - list_add(&cpuctx->rotation_list, head); -} - -static void get_ctx(struct perf_event_context *ctx) -{ - WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); -} - -static void free_ctx(struct rcu_head *head) -{ - struct perf_event_context *ctx; - - ctx = container_of(head, struct perf_event_context, rcu_head); - kfree(ctx); -} - -static void put_ctx(struct perf_event_context *ctx) -{ - if (atomic_dec_and_test(&ctx->refcount)) { - if (ctx->parent_ctx) - put_ctx(ctx->parent_ctx); - if (ctx->task) - put_task_struct(ctx->task); - call_rcu(&ctx->rcu_head, free_ctx); - } -} - -static void unclone_ctx(struct perf_event_context *ctx) -{ - if (ctx->parent_ctx) { - put_ctx(ctx->parent_ctx); - ctx->parent_ctx = NULL; - } -} - -static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) -{ - /* - * only top level events have the pid namespace they were created in - */ - if (event->parent) - event = event->parent; - - return task_tgid_nr_ns(p, event->ns); -} - -static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) -{ - /* - * only top level events have the pid namespace they were created in - */ - if (event->parent) - event = event->parent; - - return task_pid_nr_ns(p, event->ns); -} - -/* - * If we inherit events we want to return the parent event id - * to userspace. - */ -static u64 primary_event_id(struct perf_event *event) -{ - u64 id = event->id; - - if (event->parent) - id = event->parent->id; - - return id; -} - -/* - * Get the perf_event_context for a task and lock it. - * This has to cope with with the fact that until it is locked, - * the context could get moved to another task. - */ -static struct perf_event_context * -perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) -{ - struct perf_event_context *ctx; - - rcu_read_lock(); -retry: - ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); - if (ctx) { - /* - * If this context is a clone of another, it might - * get swapped for another underneath us by - * perf_event_task_sched_out, though the - * rcu_read_lock() protects us from any context - * getting freed. Lock the context and check if it - * got swapped before we could get the lock, and retry - * if so. If we locked the right context, then it - * can't get swapped on us any more. - */ - raw_spin_lock_irqsave(&ctx->lock, *flags); - if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { - raw_spin_unlock_irqrestore(&ctx->lock, *flags); - goto retry; - } - - if (!atomic_inc_not_zero(&ctx->refcount)) { - raw_spin_unlock_irqrestore(&ctx->lock, *flags); - ctx = NULL; - } - } - rcu_read_unlock(); - return ctx; -} - -/* - * Get the context for a task and increment its pin_count so it - * can't get swapped to another task. This also increments its - * reference count so that the context can't get freed. - */ -static struct perf_event_context * -perf_pin_task_context(struct task_struct *task, int ctxn) -{ - struct perf_event_context *ctx; - unsigned long flags; - - ctx = perf_lock_task_context(task, ctxn, &flags); - if (ctx) { - ++ctx->pin_count; - raw_spin_unlock_irqrestore(&ctx->lock, flags); - } - return ctx; -} - -static void perf_unpin_context(struct perf_event_context *ctx) -{ - unsigned long flags; - - raw_spin_lock_irqsave(&ctx->lock, flags); - --ctx->pin_count; - raw_spin_unlock_irqrestore(&ctx->lock, flags); -} - -/* - * Update the record of the current time in a context. - */ -static void update_context_time(struct perf_event_context *ctx) -{ - u64 now = perf_clock(); - - ctx->time += now - ctx->timestamp; - ctx->timestamp = now; -} - -static u64 perf_event_time(struct perf_event *event) -{ - struct perf_event_context *ctx = event->ctx; - - if (is_cgroup_event(event)) - return perf_cgroup_event_time(event); - - return ctx ? ctx->time : 0; -} - -/* - * Update the total_time_enabled and total_time_running fields for a event. - */ -static void update_event_times(struct perf_event *event) -{ - struct perf_event_context *ctx = event->ctx; - u64 run_end; - - if (event->state < PERF_EVENT_STATE_INACTIVE || - event->group_leader->state < PERF_EVENT_STATE_INACTIVE) - return; - /* - * in cgroup mode, time_enabled represents - * the time the event was enabled AND active - * tasks were in the monitored cgroup. This is - * independent of the activity of the context as - * there may be a mix of cgroup and non-cgroup events. - * - * That is why we treat cgroup events differently - * here. - */ - if (is_cgroup_event(event)) - run_end = perf_event_time(event); - else if (ctx->is_active) - run_end = ctx->time; - else - run_end = event->tstamp_stopped; - - event->total_time_enabled = run_end - event->tstamp_enabled; - - if (event->state == PERF_EVENT_STATE_INACTIVE) - run_end = event->tstamp_stopped; - else - run_end = perf_event_time(event); - - event->total_time_running = run_end - event->tstamp_running; - -} - -/* - * Update total_time_enabled and total_time_running for all events in a group. - */ -static void update_group_times(struct perf_event *leader) -{ - struct perf_event *event; - - update_event_times(leader); - list_for_each_entry(event, &leader->sibling_list, group_entry) - update_event_times(event); -} - -static struct list_head * -ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) -{ - if (event->attr.pinned) - return &ctx->pinned_groups; - else - return &ctx->flexible_groups; -} - -/* - * Add a event from the lists for its context. - * Must be called with ctx->mutex and ctx->lock held. - */ -static void -list_add_event(struct perf_event *event, struct perf_event_context *ctx) -{ - WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); - event->attach_state |= PERF_ATTACH_CONTEXT; - - /* - * If we're a stand alone event or group leader, we go to the context - * list, group events are kept attached to the group so that - * perf_group_detach can, at all times, locate all siblings. - */ - if (event->group_leader == event) { - struct list_head *list; - - if (is_software_event(event)) - event->group_flags |= PERF_GROUP_SOFTWARE; - - list = ctx_group_list(event, ctx); - list_add_tail(&event->group_entry, list); - } - - if (is_cgroup_event(event)) - ctx->nr_cgroups++; - - list_add_rcu(&event->event_entry, &ctx->event_list); - if (!ctx->nr_events) - perf_pmu_rotate_start(ctx->pmu); - ctx->nr_events++; - if (event->attr.inherit_stat) - ctx->nr_stat++; -} - -/* - * Called at perf_event creation and when events are attached/detached from a - * group. - */ -static void perf_event__read_size(struct perf_event *event) -{ - int entry = sizeof(u64); /* value */ - int size = 0; - int nr = 1; - - if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) - size += sizeof(u64); - - if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) - size += sizeof(u64); - - if (event->attr.read_format & PERF_FORMAT_ID) - entry += sizeof(u64); - - if (event->attr.read_format & PERF_FORMAT_GROUP) { - nr += event->group_leader->nr_siblings; - size += sizeof(u64); - } - - size += entry * nr; - event->read_size = size; -} - -static void perf_event__header_size(struct perf_event *event) -{ - struct perf_sample_data *data; - u64 sample_type = event->attr.sample_type; - u16 size = 0; - - perf_event__read_size(event); - - if (sample_type & PERF_SAMPLE_IP) - size += sizeof(data->ip); - - if (sample_type & PERF_SAMPLE_ADDR) - size += sizeof(data->addr); - - if (sample_type & PERF_SAMPLE_PERIOD) - size += sizeof(data->period); - - if (sample_type & PERF_SAMPLE_READ) - size += event->read_size; - - event->header_size = size; -} - -static void perf_event__id_header_size(struct perf_event *event) -{ - struct perf_sample_data *data; - u64 sample_type = event->attr.sample_type; - u16 size = 0; - - if (sample_type & PERF_SAMPLE_TID) - size += sizeof(data->tid_entry); - - if (sample_type & PERF_SAMPLE_TIME) - size += sizeof(data->time); - - if (sample_type & PERF_SAMPLE_ID) - size += sizeof(data->id); - - if (sample_type & PERF_SAMPLE_STREAM_ID) - size += sizeof(data->stream_id); - - if (sample_type & PERF_SAMPLE_CPU) - size += sizeof(data->cpu_entry); - - event->id_header_size = size; -} - -static void perf_group_attach(struct perf_event *event) -{ - struct perf_event *group_leader = event->group_leader, *pos; - - /* - * We can have double attach due to group movement in perf_event_open. - */ - if (event->attach_state & PERF_ATTACH_GROUP) - return; - - event->attach_state |= PERF_ATTACH_GROUP; - - if (group_leader == event) - return; - - if (group_leader->group_flags & PERF_GROUP_SOFTWARE && - !is_software_event(event)) - group_leader->group_flags &= ~PERF_GROUP_SOFTWARE; - - list_add_tail(&event->group_entry, &group_leader->sibling_list); - group_leader->nr_siblings++; - - perf_event__header_size(group_leader); - - list_for_each_entry(pos, &group_leader->sibling_list, group_entry) - perf_event__header_size(pos); -} - -/* - * Remove a event from the lists for its context. - * Must be called with ctx->mutex and ctx->lock held. - */ -static void -list_del_event(struct perf_event *event, struct perf_event_context *ctx) -{ - struct perf_cpu_context *cpuctx; - /* - * We can have double detach due to exit/hot-unplug + close. - */ - if (!(event->attach_state & PERF_ATTACH_CONTEXT)) - return; - - event->attach_state &= ~PERF_ATTACH_CONTEXT; - - if (is_cgroup_event(event)) { - ctx->nr_cgroups--; - cpuctx = __get_cpu_context(ctx); - /* - * if there are no more cgroup events - * then cler cgrp to avoid stale pointer - * in update_cgrp_time_from_cpuctx() - */ - if (!ctx->nr_cgroups) - cpuctx->cgrp = NULL; - } - - ctx->nr_events--; - if (event->attr.inherit_stat) - ctx->nr_stat--; - - list_del_rcu(&event->event_entry); - - if (event->group_leader == event) - list_del_init(&event->group_entry); - - update_group_times(event); - - /* - * If event was in error state, then keep it - * that way, otherwise bogus counts will be - * returned on read(). The only way to get out - * of error state is by explicit re-enabling - * of the event - */ - if (event->state > PERF_EVENT_STATE_OFF) - event->state = PERF_EVENT_STATE_OFF; -} - -static void perf_group_detach(struct perf_event *event) -{ - struct perf_event *sibling, *tmp; - struct list_head *list = NULL; - - /* - * We can have double detach due to exit/hot-unplug + close. - */ - if (!(event->attach_state & PERF_ATTACH_GROUP)) - return; - - event->attach_state &= ~PERF_ATTACH_GROUP; - - /* - * If this is a sibling, remove it from its group. - */ - if (event->group_leader != event) { - list_del_init(&event->group_entry); - event->group_leader->nr_siblings--; - goto out; - } - - if (!list_empty(&event->group_entry)) - list = &event->group_entry; - - /* - * If this was a group event with sibling events then - * upgrade the siblings to singleton events by adding them - * to whatever list we are on. - */ - list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { - if (list) - list_move_tail(&sibling->group_entry, list); - sibling->group_leader = sibling; - - /* Inherit group flags from the previous leader */ - sibling->group_flags = event->group_flags; - } - -out: - perf_event__header_size(event->group_leader); - - list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) - perf_event__header_size(tmp); -} - -static inline int -event_filter_match(struct perf_event *event) -{ - return (event->cpu == -1 || event->cpu == smp_processor_id()) - && perf_cgroup_match(event); -} - -static void -event_sched_out(struct perf_event *event, - struct perf_cpu_context *cpuctx, - struct perf_event_context *ctx) -{ - u64 tstamp = perf_event_time(event); - u64 delta; - /* - * An event which could not be activated because of - * filter mismatch still needs to have its timings - * maintained, otherwise bogus information is return - * via read() for time_enabled, time_running: - */ - if (event->state == PERF_EVENT_STATE_INACTIVE - && !event_filter_match(event)) { - delta = tstamp - event->tstamp_stopped; - event->tstamp_running += delta; - event->tstamp_stopped = tstamp; - } - - if (event->state != PERF_EVENT_STATE_ACTIVE) - return; - - event->state = PERF_EVENT_STATE_INACTIVE; - if (event->pending_disable) { - event->pending_disable = 0; - event->state = PERF_EVENT_STATE_OFF; - } - event->tstamp_stopped = tstamp; - event->pmu->del(event, 0); - event->oncpu = -1; - - if (!is_software_event(event)) - cpuctx->active_oncpu--; - ctx->nr_active--; - if (event->attr.exclusive || !cpuctx->active_oncpu) - cpuctx->exclusive = 0; -} - -static void -group_sched_out(struct perf_event *group_event, - struct perf_cpu_context *cpuctx, - struct perf_event_context *ctx) -{ - struct perf_event *event; - int state = group_event->state; - - event_sched_out(group_event, cpuctx, ctx); - - /* - * Schedule out siblings (if any): - */ - list_for_each_entry(event, &group_event->sibling_list, group_entry) - event_sched_out(event, cpuctx, ctx); - - if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) - cpuctx->exclusive = 0; -} - -/* - * Cross CPU call to remove a performance event - * - * We disable the event on the hardware level first. After that we - * remove it from the context list. - */ -static int __perf_remove_from_context(void *info) -{ - struct perf_event *event = info; - struct perf_event_context *ctx = event->ctx; - struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); - - raw_spin_lock(&ctx->lock); - event_sched_out(event, cpuctx, ctx); - list_del_event(event, ctx); - raw_spin_unlock(&ctx->lock); - - return 0; -} - - -/* - * Remove the event from a task's (or a CPU's) list of events. - * - * CPU events are removed with a smp call. For task events we only - * call when the task is on a CPU. - * - * If event->ctx is a cloned context, callers must make sure that - * every task struct that event->ctx->task could possibly point to - * remains valid. This is OK when called from perf_release since - * that only calls us on the top-level context, which can't be a clone. - * When called from perf_event_exit_task, it's OK because the - * context has been detached from its task. - */ -static void perf_remove_from_context(struct perf_event *event) -{ - struct perf_event_context *ctx = event->ctx; - struct task_struct *task = ctx->task; - - lockdep_assert_held(&ctx->mutex); - - if (!task) { - /* - * Per cpu events are removed via an smp call and - * the removal is always successful. - */ - cpu_function_call(event->cpu, __perf_remove_from_context, event); - return; - } - -retry: - if (!task_function_call(task, __perf_remove_from_context, event)) - return; - - raw_spin_lock_irq(&ctx->lock); - /* - * If we failed to find a running task, but find the context active now - * that we've acquired the ctx->lock, retry. - */ - if (ctx->is_active) { - raw_spin_unlock_irq(&ctx->lock); - goto retry; - } - - /* - * Since the task isn't running, its safe to remove the event, us - * holding the ctx->lock ensures the task won't get scheduled in. - */ - list_del_event(event, ctx); - raw_spin_unlock_irq(&ctx->lock); -} - -/* - * Cross CPU call to disable a performance event - */ -static int __perf_event_disable(void *info) -{ - struct perf_event *event = info; - struct perf_event_context *ctx = event->ctx; - struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); - - /* - * If this is a per-task event, need to check whether this - * event's task is the current task on this cpu. - * - * Can trigger due to concurrent perf_event_context_sched_out() - * flipping contexts around. - */ - if (ctx->task && cpuctx->task_ctx != ctx) - return -EINVAL; - - raw_spin_lock(&ctx->lock); - - /* - * If the event is on, turn it off. - * If it is in error state, leave it in error state. - */ - if (event->state >= PERF_EVENT_STATE_INACTIVE) { - update_context_time(ctx); - update_cgrp_time_from_event(event); - update_group_times(event); - if (event == event->group_leader) - group_sched_out(event, cpuctx, ctx); - else - event_sched_out(event, cpuctx, ctx); - event->state = PERF_EVENT_STATE_OFF; - } - - raw_spin_unlock(&ctx->lock); - - return 0; -} - -/* - * Disable a event. - * - * If event->ctx is a cloned context, callers must make sure that - * every task struct that event->ctx->task could possibly point to - * remains valid. This condition is satisifed when called through - * perf_event_for_each_child or perf_event_for_each because they - * hold the top-level event's child_mutex, so any descendant that - * goes to exit will block in sync_child_event. - * When called from perf_pending_event it's OK because event->ctx - * is the current context on this CPU and preemption is disabled, - * hence we can't get into perf_event_task_sched_out for this context. - */ -void perf_event_disable(struct perf_event *event) -{ - struct perf_event_context *ctx = event->ctx; - struct task_struct *task = ctx->task; - - if (!task) { - /* - * Disable the event on the cpu that it's on - */ - cpu_function_call(event->cpu, __perf_event_disable, event); - return; - } - -retry: - if (!task_function_call(task, __perf_event_disable, event)) - return; - - raw_spin_lock_irq(&ctx->lock); - /* - * If the event is still active, we need to retry the cross-call. - */ - if (event->state == PERF_EVENT_STATE_ACTIVE) { - raw_spin_unlock_irq(&ctx->lock); - /* - * Reload the task pointer, it might have been changed by - * a concurrent perf_event_context_sched_out(). - */ - task = ctx->task; - goto retry; - } - - /* - * Since we have the lock this context can't be scheduled - * in, so we can change the state safely. - */ - if (event->state == PERF_EVENT_STATE_INACTIVE) { - update_group_times(event); - event->state = PERF_EVENT_STATE_OFF; - } - raw_spin_unlock_irq(&ctx->lock); -} - -static void perf_set_shadow_time(struct perf_event *event, - struct perf_event_context *ctx, - u64 tstamp) -{ - /* - * use the correct time source for the time snapshot - * - * We could get by without this by leveraging the - * fact that to get to this function, the caller - * has most likely already called update_context_time() - * and update_cgrp_time_xx() and thus both timestamp - * are identical (or very close). Given that tstamp is, - * already adjusted for cgroup, we could say that: - * tstamp - ctx->timestamp - * is equivalent to - * tstamp - cgrp->timestamp. - * - * Then, in perf_output_read(), the calculation would - * work with no changes because: - * - event is guaranteed scheduled in - * - no scheduled out in between - * - thus the timestamp would be the same - * - * But this is a bit hairy. - * - * So instead, we have an explicit cgroup call to remain - * within the time time source all along. We believe it - * is cleaner and simpler to understand. - */ - if (is_cgroup_event(event)) - perf_cgroup_set_shadow_time(event, tstamp); - else - event->shadow_ctx_time = tstamp - ctx->timestamp; -} - -#define MAX_INTERRUPTS (~0ULL) - -static void perf_log_throttle(struct perf_event *event, int enable); - -static int -event_sched_in(struct perf_event *event, - struct perf_cpu_context *cpuctx, - struct perf_event_context *ctx) -{ - u64 tstamp = perf_event_time(event); - - if (event->state <= PERF_EVENT_STATE_OFF) - return 0; - - event->state = PERF_EVENT_STATE_ACTIVE; - event->oncpu = smp_processor_id(); - - /* - * Unthrottle events, since we scheduled we might have missed several - * ticks already, also for a heavily scheduling task there is little - * guarantee it'll get a tick in a timely manner. - */ - if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { - perf_log_throttle(event, 1); - event->hw.interrupts = 0; - } - - /* - * The new state must be visible before we turn it on in the hardware: - */ - smp_wmb(); - - if (event->pmu->add(event, PERF_EF_START)) { - event->state = PERF_EVENT_STATE_INACTIVE; - event->oncpu = -1; - return -EAGAIN; - } - - event->tstamp_running += tstamp - event->tstamp_stopped; - - perf_set_shadow_time(event, ctx, tstamp); - - if (!is_software_event(event)) - cpuctx->active_oncpu++; - ctx->nr_active++; - - if (event->attr.exclusive) - cpuctx->exclusive = 1; - - return 0; -} - -static int -group_sched_in(struct perf_event *group_event, - struct perf_cpu_context *cpuctx, - struct perf_event_context *ctx) -{ - struct perf_event *event, *partial_group = NULL; - struct pmu *pmu = group_event->pmu; - u64 now = ctx->time; - bool simulate = false; - - if (group_event->state == PERF_EVENT_STATE_OFF) - return 0; - - pmu->start_txn(pmu); - - if (event_sched_in(group_event, cpuctx, ctx)) { - pmu->cancel_txn(pmu); - return -EAGAIN; - } - - /* - * Schedule in siblings as one group (if any): - */ - list_for_each_entry(event, &group_event->sibling_list, group_entry) { - if (event_sched_in(event, cpuctx, ctx)) { - partial_group = event; - goto group_error; - } - } - - if (!pmu->commit_txn(pmu)) - return 0; - -group_error: - /* - * Groups can be scheduled in as one unit only, so undo any - * partial group before returning: - * The events up to the failed event are scheduled out normally, - * tstamp_stopped will be updated. - * - * The failed events and the remaining siblings need to have - * their timings updated as if they had gone thru event_sched_in() - * and event_sched_out(). This is required to get consistent timings - * across the group. This also takes care of the case where the group - * could never be scheduled by ensuring tstamp_stopped is set to mark - * the time the event was actually stopped, such that time delta - * calculation in update_event_times() is correct. - */ - list_for_each_entry(event, &group_event->sibling_list, group_entry) { - if (event == partial_group) - simulate = true; - - if (simulate) { - event->tstamp_running += now - event->tstamp_stopped; - event->tstamp_stopped = now; - } else { - event_sched_out(event, cpuctx, ctx); - } - } - event_sched_out(group_event, cpuctx, ctx); - - pmu->cancel_txn(pmu); - - return -EAGAIN; -} - -/* - * Work out whether we can put this event group on the CPU now. - */ -static int group_can_go_on(struct perf_event *event, - struct perf_cpu_context *cpuctx, - int can_add_hw) -{ - /* - * Groups consisting entirely of software events can always go on. - */ - if (event->group_flags & PERF_GROUP_SOFTWARE) - return 1; - /* - * If an exclusive group is already on, no other hardware - * events can go on. - */ - if (cpuctx->exclusive) - return 0; - /* - * If this group is exclusive and there are already - * events on the CPU, it can't go on. - */ - if (event->attr.exclusive && cpuctx->active_oncpu) - return 0; - /* - * Otherwise, try to add it if all previous groups were able - * to go on. - */ - return can_add_hw; -} - -static void add_event_to_ctx(struct perf_event *event, - struct perf_event_context *ctx) -{ - u64 tstamp = perf_event_time(event); - - list_add_event(event, ctx); - perf_group_attach(event); - event->tstamp_enabled = tstamp; - event->tstamp_running = tstamp; - event->tstamp_stopped = tstamp; -} - -static void perf_event_context_sched_in(struct perf_event_context *ctx, - struct task_struct *tsk); - -/* - * Cross CPU call to install and enable a performance event - * - * Must be called with ctx->mutex held - */ -static int __perf_install_in_context(void *info) -{ - struct perf_event *event = info; - struct perf_event_context *ctx = event->ctx; - struct perf_event *leader = event->group_leader; - struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); - int err; - - /* - * In case we're installing a new context to an already running task, - * could also happen before perf_event_task_sched_in() on architectures - * which do context switches with IRQs enabled. - */ - if (ctx->task && !cpuctx->task_ctx) - perf_event_context_sched_in(ctx, ctx->task); - - raw_spin_lock(&ctx->lock); - ctx->is_active = 1; - update_context_time(ctx); - /* - * update cgrp time only if current cgrp - * matches event->cgrp. Must be done before - * calling add_event_to_ctx() - */ - update_cgrp_time_from_event(event); - - add_event_to_ctx(event, ctx); - - if (!event_filter_match(event)) - goto unlock; - - /* - * Don't put the event on if it is disabled or if - * it is in a group and the group isn't on. - */ - if (event->state != PERF_EVENT_STATE_INACTIVE || - (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)) - goto unlock; - - /* - * An exclusive event can't go on if there are already active - * hardware events, and no hardware event can go on if there - * is already an exclusive event on. - */ - if (!group_can_go_on(event, cpuctx, 1)) - err = -EEXIST; - else - err = event_sched_in(event, cpuctx, ctx); - - if (err) { - /* - * This event couldn't go on. If it is in a group - * then we have to pull the whole group off. - * If the event group is pinned then put it in error state. - */ - if (leader != event) - group_sched_out(leader, cpuctx, ctx); - if (leader->attr.pinned) { - update_group_times(leader); - leader->state = PERF_EVENT_STATE_ERROR; - } - } - -unlock: - raw_spin_unlock(&ctx->lock); - - return 0; -} - -/* - * Attach a performance event to a context - * - * First we add the event to the list with the hardware enable bit - * in event->hw_config cleared. - * - * If the event is attached to a task which is on a CPU we use a smp - * call to enable it in the task context. The task might have been - * scheduled away, but we check this in the smp call again. - */ -static void -perf_install_in_context(struct perf_event_context *ctx, - struct perf_event *event, - int cpu) -{ - struct task_struct *task = ctx->task; - - lockdep_assert_held(&ctx->mutex); - - event->ctx = ctx; - - if (!task) { - /* - * Per cpu events are installed via an smp call and - * the install is always successful. - */ - cpu_function_call(cpu, __perf_install_in_context, event); - return; - } - -retry: - if (!task_function_call(task, __perf_install_in_context, event)) - return; - - raw_spin_lock_irq(&ctx->lock); - /* - * If we failed to find a running task, but find the context active now - * that we've acquired the ctx->lock, retry. - */ - if (ctx->is_active) { - raw_spin_unlock_irq(&ctx->lock); - goto retry; - } - - /* - * Since the task isn't running, its safe to add the event, us holding - * the ctx->lock ensures the task won't get scheduled in. - */ - add_event_to_ctx(event, ctx); - raw_spin_unlock_irq(&ctx->lock); -} - -/* - * Put a event into inactive state and update time fields. - * Enabling the leader of a group effectively enables all - * the group members that aren't explicitly disabled, so we - * have to update their ->tstamp_enabled also. - * Note: this works for group members as well as group leaders - * since the non-leader members' sibling_lists will be empty. - */ -static void __perf_event_mark_enabled(struct perf_event *event, - struct perf_event_context *ctx) -{ - struct perf_event *sub; - u64 tstamp = perf_event_time(event); - - event->state = PERF_EVENT_STATE_INACTIVE; - event->tstamp_enabled = tstamp - event->total_time_enabled; - list_for_each_entry(sub, &event->sibling_list, group_entry) { - if (sub->state >= PERF_EVENT_STATE_INACTIVE) - sub->tstamp_enabled = tstamp - sub->total_time_enabled; - } -} - -/* - * Cross CPU call to enable a performance event - */ -static int __perf_event_enable(void *info) -{ - struct perf_event *event = info; - struct perf_event_context *ctx = event->ctx; - struct perf_event *leader = event->group_leader; - struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); - int err; - - if (WARN_ON_ONCE(!ctx->is_active)) - return -EINVAL; - - raw_spin_lock(&ctx->lock); - update_context_time(ctx); - - if (event->state >= PERF_EVENT_STATE_INACTIVE) - goto unlock; - - /* - * set current task's cgroup time reference point - */ - perf_cgroup_set_timestamp(current, ctx); - - __perf_event_mark_enabled(event, ctx); - - if (!event_filter_match(event)) { - if (is_cgroup_event(event)) - perf_cgroup_defer_enabled(event); - goto unlock; - } - - /* - * If the event is in a group and isn't the group leader, - * then don't put it on unless the group is on. - */ - if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) - goto unlock; - - if (!group_can_go_on(event, cpuctx, 1)) { - err = -EEXIST; - } else { - if (event == leader) - err = group_sched_in(event, cpuctx, ctx); - else - err = event_sched_in(event, cpuctx, ctx); - } - - if (err) { - /* - * If this event can't go on and it's part of a - * group, then the whole group has to come off. - */ - if (leader != event) - group_sched_out(leader, cpuctx, ctx); - if (leader->attr.pinned) { - update_group_times(leader); - leader->state = PERF_EVENT_STATE_ERROR; - } - } - -unlock: - raw_spin_unlock(&ctx->lock); - - return 0; -} - -/* - * Enable a event. - * - * If event->ctx is a cloned context, callers must make sure that - * every task struct that event->ctx->task could possibly point to - * remains valid. This condition is satisfied when called through - * perf_event_for_each_child or perf_event_for_each as described - * for perf_event_disable. - */ -void perf_event_enable(struct perf_event *event) -{ - struct perf_event_context *ctx = event->ctx; - struct task_struct *task = ctx->task; - - if (!task) { - /* - * Enable the event on the cpu that it's on - */ - cpu_function_call(event->cpu, __perf_event_enable, event); - return; - } - - raw_spin_lock_irq(&ctx->lock); - if (event->state >= PERF_EVENT_STATE_INACTIVE) - goto out; - - /* - * If the event is in error state, clear that first. - * That way, if we see the event in error state below, we - * know that it has gone back into error state, as distinct - * from the task having been scheduled away before the - * cross-call arrived. - */ - if (event->state == PERF_EVENT_STATE_ERROR) - event->state = PERF_EVENT_STATE_OFF; - -retry: - if (!ctx->is_active) { - __perf_event_mark_enabled(event, ctx); - goto out; - } - - raw_spin_unlock_irq(&ctx->lock); - - if (!task_function_call(task, __perf_event_enable, event)) - return; - - raw_spin_lock_irq(&ctx->lock); - - /* - * If the context is active and the event is still off, - * we need to retry the cross-call. - */ - if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) { - /* - * task could have been flipped by a concurrent - * perf_event_context_sched_out() - */ - task = ctx->task; - goto retry; - } - -out: - raw_spin_unlock_irq(&ctx->lock); -} - -static int perf_event_refresh(struct perf_event *event, int refresh) -{ - /* - * not supported on inherited events - */ - if (event->attr.inherit || !is_sampling_event(event)) - return -EINVAL; - - atomic_add(refresh, &event->event_limit); - perf_event_enable(event); - - return 0; -} - -static void ctx_sched_out(struct perf_event_context *ctx, - struct perf_cpu_context *cpuctx, - enum event_type_t event_type) -{ - struct perf_event *event; - - raw_spin_lock(&ctx->lock); - perf_pmu_disable(ctx->pmu); - ctx->is_active = 0; - if (likely(!ctx->nr_events)) - goto out; - update_context_time(ctx); - update_cgrp_time_from_cpuctx(cpuctx); - - if (!ctx->nr_active) - goto out; - - if (event_type & EVENT_PINNED) { - list_for_each_entry(event, &ctx->pinned_groups, group_entry) - group_sched_out(event, cpuctx, ctx); - } - - if (event_type & EVENT_FLEXIBLE) { - list_for_each_entry(event, &ctx->flexible_groups, group_entry) - group_sched_out(event, cpuctx, ctx); - } -out: - perf_pmu_enable(ctx->pmu); - raw_spin_unlock(&ctx->lock); -} - -/* - * Test whether two contexts are equivalent, i.e. whether they - * have both been cloned from the same version of the same context - * and they both have the same number of enabled events. - * If the number of enabled events is the same, then the set - * of enabled events should be the same, because these are both - * inherited contexts, therefore we can't access individual events - * in them directly with an fd; we can only enable/disable all - * events via prctl, or enable/disable all events in a family - * via ioctl, which will have the same effect on both contexts. - */ -static int context_equiv(struct perf_event_context *ctx1, - struct perf_event_context *ctx2) -{ - return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx - && ctx1->parent_gen == ctx2->parent_gen - && !ctx1->pin_count && !ctx2->pin_count; -} - -static void __perf_event_sync_stat(struct perf_event *event, - struct perf_event *next_event) -{ - u64 value; - - if (!event->attr.inherit_stat) - return; - - /* - * Update the event value, we cannot use perf_event_read() - * because we're in the middle of a context switch and have IRQs - * disabled, which upsets smp_call_function_single(), however - * we know the event must be on the current CPU, therefore we - * don't need to use it. - */ - switch (event->state) { - case PERF_EVENT_STATE_ACTIVE: - event->pmu->read(event); - /* fall-through */ - - case PERF_EVENT_STATE_INACTIVE: - update_event_times(event); - break; - - default: - break; - } - - /* - * In order to keep per-task stats reliable we need to flip the event - * values when we flip the contexts. - */ - value = local64_read(&next_event->count); - value = local64_xchg(&event->count, value); - local64_set(&next_event->count, value); - - swap(event->total_time_enabled, next_event->total_time_enabled); - swap(event->total_time_running, next_event->total_time_running); - - /* - * Since we swizzled the values, update the user visible data too. - */ - perf_event_update_userpage(event); - perf_event_update_userpage(next_event); -} - -#define list_next_entry(pos, member) \ - list_entry(pos->member.next, typeof(*pos), member) - -static void perf_event_sync_stat(struct perf_event_context *ctx, - struct perf_event_context *next_ctx) -{ - struct perf_event *event, *next_event; - - if (!ctx->nr_stat) - return; - - update_context_time(ctx); - - event = list_first_entry(&ctx->event_list, - struct perf_event, event_entry); - - next_event = list_first_entry(&next_ctx->event_list, - struct perf_event, event_entry); - - while (&event->event_entry != &ctx->event_list && - &next_event->event_entry != &next_ctx->event_list) { - - __perf_event_sync_stat(event, next_event); - - event = list_next_entry(event, event_entry); - next_event = list_next_entry(next_event, event_entry); - } -} - -static void perf_event_context_sched_out(struct task_struct *task, int ctxn, - struct task_struct *next) -{ - struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; - struct perf_event_context *next_ctx; - struct perf_event_context *parent; - struct perf_cpu_context *cpuctx; - int do_switch = 1; - - if (likely(!ctx)) - return; - - cpuctx = __get_cpu_context(ctx); - if (!cpuctx->task_ctx) - return; - - rcu_read_lock(); - parent = rcu_dereference(ctx->parent_ctx); - next_ctx = next->perf_event_ctxp[ctxn]; - if (parent && next_ctx && - rcu_dereference(next_ctx->parent_ctx) == parent) { - /* - * Looks like the two contexts are clones, so we might be - * able to optimize the context switch. We lock both - * contexts and check that they are clones under the - * lock (including re-checking that neither has been - * uncloned in the meantime). It doesn't matter which - * order we take the locks because no other cpu could - * be trying to lock both of these tasks. - */ - raw_spin_lock(&ctx->lock); - raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); - if (context_equiv(ctx, next_ctx)) { - /* - * XXX do we need a memory barrier of sorts - * wrt to rcu_dereference() of perf_event_ctxp - */ - task->perf_event_ctxp[ctxn] = next_ctx; - next->perf_event_ctxp[ctxn] = ctx; - ctx->task = next; - next_ctx->task = task; - do_switch = 0; - - perf_event_sync_stat(ctx, next_ctx); - } - raw_spin_unlock(&next_ctx->lock); - raw_spin_unlock(&ctx->lock); - } - rcu_read_unlock(); - - if (do_switch) { - ctx_sched_out(ctx, cpuctx, EVENT_ALL); - cpuctx->task_ctx = NULL; - } -} - -#define for_each_task_context_nr(ctxn) \ - for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++) - -/* - * Called from scheduler to remove the events of the current task, - * with interrupts disabled. - * - * We stop each event and update the event value in event->count. - * - * This does not protect us against NMI, but disable() - * sets the disabled bit in the control field of event _before_ - * accessing the event control register. If a NMI hits, then it will - * not restart the event. - */ -void __perf_event_task_sched_out(struct task_struct *task, - struct task_struct *next) -{ - int ctxn; - - for_each_task_context_nr(ctxn) - perf_event_context_sched_out(task, ctxn, next); - - /* - * if cgroup events exist on this CPU, then we need - * to check if we have to switch out PMU state. - * cgroup event are system-wide mode only - */ - if (atomic_read(&__get_cpu_var(perf_cgroup_events))) - perf_cgroup_sched_out(task); -} - -static void task_ctx_sched_out(struct perf_event_context *ctx, - enum event_type_t event_type) -{ - struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); - - if (!cpuctx->task_ctx) - return; - - if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) - return; - - ctx_sched_out(ctx, cpuctx, event_type); - cpuctx->task_ctx = NULL; -} - -/* - * Called with IRQs disabled - */ -static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, - enum event_type_t event_type) -{ - ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); -} - -static void -ctx_pinned_sched_in(struct perf_event_context *ctx, - struct perf_cpu_context *cpuctx) -{ - struct perf_event *event; - - list_for_each_entry(event, &ctx->pinned_groups, group_entry) { - if (event->state <= PERF_EVENT_STATE_OFF) - continue; - if (!event_filter_match(event)) - continue; - - /* may need to reset tstamp_enabled */ - if (is_cgroup_event(event)) - perf_cgroup_mark_enabled(event, ctx); - - if (group_can_go_on(event, cpuctx, 1)) - group_sched_in(event, cpuctx, ctx); - - /* - * If this pinned group hasn't been scheduled, - * put it in error state. - */ - if (event->state == PERF_EVENT_STATE_INACTIVE) { - update_group_times(event); - event->state = PERF_EVENT_STATE_ERROR; - } - } -} - -static void -ctx_flexible_sched_in(struct perf_event_context *ctx, - struct perf_cpu_context *cpuctx) -{ - struct perf_event *event; - int can_add_hw = 1; - - list_for_each_entry(event, &ctx->flexible_groups, group_entry) { - /* Ignore events in OFF or ERROR state */ - if (event->state <= PERF_EVENT_STATE_OFF) - continue; - /* - * Listen to the 'cpu' scheduling filter constraint - * of events: - */ - if (!event_filter_match(event)) - continue; - - /* may need to reset tstamp_enabled */ - if (is_cgroup_event(event)) - perf_cgroup_mark_enabled(event, ctx); - - if (group_can_go_on(event, cpuctx, can_add_hw)) { - if (group_sched_in(event, cpuctx, ctx)) - can_add_hw = 0; - } - } -} - -static void -ctx_sched_in(struct perf_event_context *ctx, - struct perf_cpu_context *cpuctx, - enum event_type_t event_type, - struct task_struct *task) -{ - u64 now; - - raw_spin_lock(&ctx->lock); - ctx->is_active = 1; - if (likely(!ctx->nr_events)) - goto out; - - now = perf_clock(); - ctx->timestamp = now; - perf_cgroup_set_timestamp(task, ctx); - /* - * First go through the list and put on any pinned groups - * in order to give them the best chance of going on. - */ - if (event_type & EVENT_PINNED) - ctx_pinned_sched_in(ctx, cpuctx); - - /* Then walk through the lower prio flexible groups */ - if (event_type & EVENT_FLEXIBLE) - ctx_flexible_sched_in(ctx, cpuctx); - -out: - raw_spin_unlock(&ctx->lock); -} - -static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, - enum event_type_t event_type, - struct task_struct *task) -{ - struct perf_event_context *ctx = &cpuctx->ctx; - - ctx_sched_in(ctx, cpuctx, event_type, task); -} - -static void task_ctx_sched_in(struct perf_event_context *ctx, - enum event_type_t event_type) -{ - struct perf_cpu_context *cpuctx; - - cpuctx = __get_cpu_context(ctx); - if (cpuctx->task_ctx == ctx) - return; - - ctx_sched_in(ctx, cpuctx, event_type, NULL); - cpuctx->task_ctx = ctx; -} - -static void perf_event_context_sched_in(struct perf_event_context *ctx, - struct task_struct *task) -{ - struct perf_cpu_context *cpuctx; - - cpuctx = __get_cpu_context(ctx); - if (cpuctx->task_ctx == ctx) - return; - - perf_pmu_disable(ctx->pmu); - /* - * We want to keep the following priority order: - * cpu pinned (that don't need to move), task pinned, - * cpu flexible, task flexible. - */ - cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); - - ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); - cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); - ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); - - cpuctx->task_ctx = ctx; - - /* - * Since these rotations are per-cpu, we need to ensure the - * cpu-context we got scheduled on is actually rotating. - */ - perf_pmu_rotate_start(ctx->pmu); - perf_pmu_enable(ctx->pmu); -} - -/* - * Called from scheduler to add the events of the current task - * with interrupts disabled. - * - * We restore the event value and then enable it. - * - * This does not protect us against NMI, but enable() - * sets the enabled bit in the control field of event _before_ - * accessing the event control register. If a NMI hits, then it will - * keep the event running. - */ -void __perf_event_task_sched_in(struct task_struct *task) -{ - struct perf_event_context *ctx; - int ctxn; - - for_each_task_context_nr(ctxn) { - ctx = task->perf_event_ctxp[ctxn]; - if (likely(!ctx)) - continue; - - perf_event_context_sched_in(ctx, task); - } - /* - * if cgroup events exist on this CPU, then we need - * to check if we have to switch in PMU state. - * cgroup event are system-wide mode only - */ - if (atomic_read(&__get_cpu_var(perf_cgroup_events))) - perf_cgroup_sched_in(task); -} - -static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) -{ - u64 frequency = event->attr.sample_freq; - u64 sec = NSEC_PER_SEC; - u64 divisor, dividend; - - int count_fls, nsec_fls, frequency_fls, sec_fls; - - count_fls = fls64(count); - nsec_fls = fls64(nsec); - frequency_fls = fls64(frequency); - sec_fls = 30; - - /* - * We got @count in @nsec, with a target of sample_freq HZ - * the target period becomes: - * - * @count * 10^9 - * period = ------------------- - * @nsec * sample_freq - * - */ - - /* - * Reduce accuracy by one bit such that @a and @b converge - * to a similar magnitude. - */ -#define REDUCE_FLS(a, b) \ -do { \ - if (a##_fls > b##_fls) { \ - a >>= 1; \ - a##_fls--; \ - } else { \ - b >>= 1; \ - b##_fls--; \ - } \ -} while (0) - - /* - * Reduce accuracy until either term fits in a u64, then proceed with - * the other, so that finally we can do a u64/u64 division. - */ - while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) { - REDUCE_FLS(nsec, frequency); - REDUCE_FLS(sec, count); - } - - if (count_fls + sec_fls > 64) { - divisor = nsec * frequency; - - while (count_fls + sec_fls > 64) { - REDUCE_FLS(count, sec); - divisor >>= 1; - } - - dividend = count * sec; - } else { - dividend = count * sec; - - while (nsec_fls + frequency_fls > 64) { - REDUCE_FLS(nsec, frequency); - dividend >>= 1; - } - - divisor = nsec * frequency; - } - - if (!divisor) - return dividend; - - return div64_u64(dividend, divisor); -} - -static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) -{ - struct hw_perf_event *hwc = &event->hw; - s64 period, sample_period; - s64 delta; - - period = perf_calculate_period(event, nsec, count); - - delta = (s64)(period - hwc->sample_period); - delta = (delta + 7) / 8; /* low pass filter */ - - sample_period = hwc->sample_period + delta; - - if (!sample_period) - sample_period = 1; - - hwc->sample_period = sample_period; - - if (local64_read(&hwc->period_left) > 8*sample_period) { - event->pmu->stop(event, PERF_EF_UPDATE); - local64_set(&hwc->period_left, 0); - event->pmu->start(event, PERF_EF_RELOAD); - } -} - -static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) -{ - struct perf_event *event; - struct hw_perf_event *hwc; - u64 interrupts, now; - s64 delta; - - raw_spin_lock(&ctx->lock); - list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { - if (event->state != PERF_EVENT_STATE_ACTIVE) - continue; - - if (!event_filter_match(event)) - continue; - - hwc = &event->hw; - - interrupts = hwc->interrupts; - hwc->interrupts = 0; - - /* - * unthrottle events on the tick - */ - if (interrupts == MAX_INTERRUPTS) { - perf_log_throttle(event, 1); - event->pmu->start(event, 0); - } - - if (!event->attr.freq || !event->attr.sample_freq) - continue; - - event->pmu->read(event); - now = local64_read(&event->count); - delta = now - hwc->freq_count_stamp; - hwc->freq_count_stamp = now; - - if (delta > 0) - perf_adjust_period(event, period, delta); - } - raw_spin_unlock(&ctx->lock); -} - -/* - * Round-robin a context's events: - */ -static void rotate_ctx(struct perf_event_context *ctx) -{ - raw_spin_lock(&ctx->lock); - - /* - * Rotate the first entry last of non-pinned groups. Rotation might be - * disabled by the inheritance code. - */ - if (!ctx->rotate_disable) - list_rotate_left(&ctx->flexible_groups); - - raw_spin_unlock(&ctx->lock); -} - -/* - * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized - * because they're strictly cpu affine and rotate_start is called with IRQs - * disabled, while rotate_context is called from IRQ context. - */ -static void perf_rotate_context(struct perf_cpu_context *cpuctx) -{ - u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC; - struct perf_event_context *ctx = NULL; - int rotate = 0, remove = 1; - - if (cpuctx->ctx.nr_events) { - remove = 0; - if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) - rotate = 1; - } - - ctx = cpuctx->task_ctx; - if (ctx && ctx->nr_events) { - remove = 0; - if (ctx->nr_events != ctx->nr_active) - rotate = 1; - } - - perf_pmu_disable(cpuctx->ctx.pmu); - perf_ctx_adjust_freq(&cpuctx->ctx, interval); - if (ctx) - perf_ctx_adjust_freq(ctx, interval); - - if (!rotate) - goto done; - - cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); - if (ctx) - task_ctx_sched_out(ctx, EVENT_FLEXIBLE); - - rotate_ctx(&cpuctx->ctx); - if (ctx) - rotate_ctx(ctx); - - cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current); - if (ctx) - task_ctx_sched_in(ctx, EVENT_FLEXIBLE); - -done: - if (remove) - list_del_init(&cpuctx->rotation_list); - - perf_pmu_enable(cpuctx->ctx.pmu); -} - -void perf_event_task_tick(void) -{ - struct list_head *head = &__get_cpu_var(rotation_list); - struct perf_cpu_context *cpuctx, *tmp; - - WARN_ON(!irqs_disabled()); - - list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) { - if (cpuctx->jiffies_interval == 1 || - !(jiffies % cpuctx->jiffies_interval)) - perf_rotate_context(cpuctx); - } -} - -static int event_enable_on_exec(struct perf_event *event, - struct perf_event_context *ctx) -{ - if (!event->attr.enable_on_exec) - return 0; - - event->attr.enable_on_exec = 0; - if (event->state >= PERF_EVENT_STATE_INACTIVE) - return 0; - - __perf_event_mark_enabled(event, ctx); - - return 1; -} - -/* - * Enable all of a task's events that have been marked enable-on-exec. - * This expects task == current. - */ -static void perf_event_enable_on_exec(struct perf_event_context *ctx) -{ - struct perf_event *event; - unsigned long flags; - int enabled = 0; - int ret; - - local_irq_save(flags); - if (!ctx || !ctx->nr_events) - goto out; - - /* - * We must ctxsw out cgroup events to avoid conflict - * when invoking perf_task_event_sched_in() later on - * in this function. Otherwise we end up trying to - * ctxswin cgroup events which are already scheduled - * in. - */ - perf_cgroup_sched_out(current); - task_ctx_sched_out(ctx, EVENT_ALL); - - raw_spin_lock(&ctx->lock); - - list_for_each_entry(event, &ctx->pinned_groups, group_entry) { - ret = event_enable_on_exec(event, ctx); - if (ret) - enabled = 1; - } - - list_for_each_entry(event, &ctx->flexible_groups, group_entry) { - ret = event_enable_on_exec(event, ctx); - if (ret) - enabled = 1; - } - - /* - * Unclone this context if we enabled any event. - */ - if (enabled) - unclone_ctx(ctx); - - raw_spin_unlock(&ctx->lock); - - /* - * Also calls ctxswin for cgroup events, if any: - */ - perf_event_context_sched_in(ctx, ctx->task); -out: - local_irq_restore(flags); -} - -/* - * Cross CPU call to read the hardware event - */ -static void __perf_event_read(void *info) -{ - struct perf_event *event = info; - struct perf_event_context *ctx = event->ctx; - struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); - - /* - * If this is a task context, we need to check whether it is - * the current task context of this cpu. If not it has been - * scheduled out before the smp call arrived. In that case - * event->count would have been updated to a recent sample - * when the event was scheduled out. - */ - if (ctx->task && cpuctx->task_ctx != ctx) - return; - - raw_spin_lock(&ctx->lock); - if (ctx->is_active) { - update_context_time(ctx); - update_cgrp_time_from_event(event); - } - update_event_times(event); - if (event->state == PERF_EVENT_STATE_ACTIVE) - event->pmu->read(event); - raw_spin_unlock(&ctx->lock); -} - -static inline u64 perf_event_count(struct perf_event *event) -{ - return local64_read(&event->count) + atomic64_read(&event->child_count); -} - -static u64 perf_event_read(struct perf_event *event) -{ - /* - * If event is enabled and currently active on a CPU, update the - * value in the event structure: - */ - if (event->state == PERF_EVENT_STATE_ACTIVE) { - smp_call_function_single(event->oncpu, - __perf_event_read, event, 1); - } else if (event->state == PERF_EVENT_STATE_INACTIVE) { - struct perf_event_context *ctx = event->ctx; - unsigned long flags; - - raw_spin_lock_irqsave(&ctx->lock, flags); - /* - * may read while context is not active - * (e.g., thread is blocked), in that case - * we cannot update context time - */ - if (ctx->is_active) { - update_context_time(ctx); - update_cgrp_time_from_event(event); - } - update_event_times(event); - raw_spin_unlock_irqrestore(&ctx->lock, flags); - } - - return perf_event_count(event); -} - -/* - * Callchain support - */ - -struct callchain_cpus_entries { - struct rcu_head rcu_head; - struct perf_callchain_entry *cpu_entries[0]; -}; - -static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); -static atomic_t nr_callchain_events; -static DEFINE_MUTEX(callchain_mutex); -struct callchain_cpus_entries *callchain_cpus_entries; - - -__weak void perf_callchain_kernel(struct perf_callchain_entry *entry, - struct pt_regs *regs) -{ -} - -__weak void perf_callchain_user(struct perf_callchain_entry *entry, - struct pt_regs *regs) -{ -} - -static void release_callchain_buffers_rcu(struct rcu_head *head) -{ - struct callchain_cpus_entries *entries; - int cpu; - - entries = container_of(head, struct callchain_cpus_entries, rcu_head); - - for_each_possible_cpu(cpu) - kfree(entries->cpu_entries[cpu]); - - kfree(entries); -} - -static void release_callchain_buffers(void) -{ - struct callchain_cpus_entries *entries; - - entries = callchain_cpus_entries; - rcu_assign_pointer(callchain_cpus_entries, NULL); - call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); -} - -static int alloc_callchain_buffers(void) -{ - int cpu; - int size; - struct callchain_cpus_entries *entries; - - /* - * We can't use the percpu allocation API for data that can be - * accessed from NMI. Use a temporary manual per cpu allocation - * until that gets sorted out. - */ - size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); - - entries = kzalloc(size, GFP_KERNEL); - if (!entries) - return -ENOMEM; - - size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS; - - for_each_possible_cpu(cpu) { - entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, - cpu_to_node(cpu)); - if (!entries->cpu_entries[cpu]) - goto fail; - } - - rcu_assign_pointer(callchain_cpus_entries, entries); - - return 0; - -fail: - for_each_possible_cpu(cpu) - kfree(entries->cpu_entries[cpu]); - kfree(entries); - - return -ENOMEM; -} - -static int get_callchain_buffers(void) -{ - int err = 0; - int count; - - mutex_lock(&callchain_mutex); - - count = atomic_inc_return(&nr_callchain_events); - if (WARN_ON_ONCE(count < 1)) { - err = -EINVAL; - goto exit; - } - - if (count > 1) { - /* If the allocation failed, give up */ - if (!callchain_cpus_entries) - err = -ENOMEM; - goto exit; - } - - err = alloc_callchain_buffers(); - if (err) - release_callchain_buffers(); -exit: - mutex_unlock(&callchain_mutex); - - return err; -} - -static void put_callchain_buffers(void) -{ - if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) { - release_callchain_buffers(); - mutex_unlock(&callchain_mutex); - } -} - -static int get_recursion_context(int *recursion) -{ - int rctx; - - if (in_nmi()) - rctx = 3; - else if (in_irq()) - rctx = 2; - else if (in_softirq()) - rctx = 1; - else - rctx = 0; - - if (recursion[rctx]) - return -1; - - recursion[rctx]++; - barrier(); - - return rctx; -} - -static inline void put_recursion_context(int *recursion, int rctx) -{ - barrier(); - recursion[rctx]--; -} - -static struct perf_callchain_entry *get_callchain_entry(int *rctx) -{ - int cpu; - struct callchain_cpus_entries *entries; - - *rctx = get_recursion_context(__get_cpu_var(callchain_recursion)); - if (*rctx == -1) - return NULL; - - entries = rcu_dereference(callchain_cpus_entries); - if (!entries) - return NULL; - - cpu = smp_processor_id(); - - return &entries->cpu_entries[cpu][*rctx]; -} - -static void -put_callchain_entry(int rctx) -{ - put_recursion_context(__get_cpu_var(callchain_recursion), rctx); -} - -static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) -{ - int rctx; - struct perf_callchain_entry *entry; - - - entry = get_callchain_entry(&rctx); - if (rctx == -1) - return NULL; - - if (!entry) - goto exit_put; - - entry->nr = 0; - - if (!user_mode(regs)) { - perf_callchain_store(entry, PERF_CONTEXT_KERNEL); - perf_callchain_kernel(entry, regs); - if (current->mm) - regs = task_pt_regs(current); - else - regs = NULL; - } - - if (regs) { - perf_callchain_store(entry, PERF_CONTEXT_USER); - perf_callchain_user(entry, regs); - } - -exit_put: - put_callchain_entry(rctx); - - return entry; -} - -/* - * Initialize the perf_event context in a task_struct: - */ -static void __perf_event_init_context(struct perf_event_context *ctx) -{ - raw_spin_lock_init(&ctx->lock); - mutex_init(&ctx->mutex); - INIT_LIST_HEAD(&ctx->pinned_groups); - INIT_LIST_HEAD(&ctx->flexible_groups); - INIT_LIST_HEAD(&ctx->event_list); - atomic_set(&ctx->refcount, 1); -} - -static struct perf_event_context * -alloc_perf_context(struct pmu *pmu, struct task_struct *task) -{ - struct perf_event_context *ctx; - - ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); - if (!ctx) - return NULL; - - __perf_event_init_context(ctx); - if (task) { - ctx->task = task; - get_task_struct(task); - } - ctx->pmu = pmu; - - return ctx; -} - -static struct task_struct * -find_lively_task_by_vpid(pid_t vpid) -{ - struct task_struct *task; - int err; - - rcu_read_lock(); - if (!vpid) - task = current; - else - task = find_task_by_vpid(vpid); - if (task) - get_task_struct(task); - rcu_read_unlock(); - - if (!task) - return ERR_PTR(-ESRCH); - - /* Reuse ptrace permission checks for now. */ - err = -EACCES; - if (!ptrace_may_access(task, PTRACE_MODE_READ)) - goto errout; - - return task; -errout: - put_task_struct(task); - return ERR_PTR(err); - -} - -/* - * Returns a matching context with refcount and pincount. - */ -static struct perf_event_context * -find_get_context(struct pmu *pmu, struct task_struct *task, int cpu) -{ - struct perf_event_context *ctx; - struct perf_cpu_context *cpuctx; - unsigned long flags; - int ctxn, err; - - if (!task) { - /* Must be root to operate on a CPU event: */ - if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) - return ERR_PTR(-EACCES); - - /* - * We could be clever and allow to attach a event to an - * offline CPU and activate it when the CPU comes up, but - * that's for later. - */ - if (!cpu_online(cpu)) - return ERR_PTR(-ENODEV); - - cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); - ctx = &cpuctx->ctx; - get_ctx(ctx); - ++ctx->pin_count; - - return ctx; - } - - err = -EINVAL; - ctxn = pmu->task_ctx_nr; - if (ctxn < 0) - goto errout; - -retry: - ctx = perf_lock_task_context(task, ctxn, &flags); - if (ctx) { - unclone_ctx(ctx); - ++ctx->pin_count; - raw_spin_unlock_irqrestore(&ctx->lock, flags); - } - - if (!ctx) { - ctx = alloc_perf_context(pmu, task); - err = -ENOMEM; - if (!ctx) - goto errout; - - get_ctx(ctx); - - err = 0; - mutex_lock(&task->perf_event_mutex); - /* - * If it has already passed perf_event_exit_task(). - * we must see PF_EXITING, it takes this mutex too. - */ - if (task->flags & PF_EXITING) - err = -ESRCH; - else if (task->perf_event_ctxp[ctxn]) - err = -EAGAIN; - else { - ++ctx->pin_count; - rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); - } - mutex_unlock(&task->perf_event_mutex); - - if (unlikely(err)) { - put_task_struct(task); - kfree(ctx); - - if (err == -EAGAIN) - goto retry; - goto errout; - } - } - - return ctx; - -errout: - return ERR_PTR(err); -} - -static void perf_event_free_filter(struct perf_event *event); - -static void free_event_rcu(struct rcu_head *head) -{ - struct perf_event *event; - - event = container_of(head, struct perf_event, rcu_head); - if (event->ns) - put_pid_ns(event->ns); - perf_event_free_filter(event); - kfree(event); -} - -static void perf_buffer_put(struct perf_buffer *buffer); - -static void free_event(struct perf_event *event) -{ - irq_work_sync(&event->pending); - - if (!event->parent) { - if (event->attach_state & PERF_ATTACH_TASK) - jump_label_dec(&perf_sched_events); - if (event->attr.mmap || event->attr.mmap_data) - atomic_dec(&nr_mmap_events); - if (event->attr.comm) - atomic_dec(&nr_comm_events); - if (event->attr.task) - atomic_dec(&nr_task_events); - if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) - put_callchain_buffers(); - if (is_cgroup_event(event)) { - atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); - jump_label_dec(&perf_sched_events); - } - } - - if (event->buffer) { - perf_buffer_put(event->buffer); - event->buffer = NULL; - } - - if (is_cgroup_event(event)) - perf_detach_cgroup(event); - - if (event->destroy) - event->destroy(event); - - if (event->ctx) - put_ctx(event->ctx); - - call_rcu(&event->rcu_head, free_event_rcu); -} - -int perf_event_release_kernel(struct perf_event *event) -{ - struct perf_event_context *ctx = event->ctx; - - /* - * Remove from the PMU, can't get re-enabled since we got - * here because the last ref went. - */ - perf_event_disable(event); - - WARN_ON_ONCE(ctx->parent_ctx); - /* - * There are two ways this annotation is useful: - * - * 1) there is a lock recursion from perf_event_exit_task - * see the comment there. - * - * 2) there is a lock-inversion with mmap_sem through - * perf_event_read_group(), which takes faults while - * holding ctx->mutex, however this is called after - * the last filedesc died, so there is no possibility - * to trigger the AB-BA case. - */ - mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING); - raw_spin_lock_irq(&ctx->lock); - perf_group_detach(event); - list_del_event(event, ctx); - raw_spin_unlock_irq(&ctx->lock); - mutex_unlock(&ctx->mutex); - - free_event(event); - - return 0; -} -EXPORT_SYMBOL_GPL(perf_event_release_kernel); - -/* - * Called when the last reference to the file is gone. - */ -static int perf_release(struct inode *inode, struct file *file) -{ - struct perf_event *event = file->private_data; - struct task_struct *owner; - - file->private_data = NULL; - - rcu_read_lock(); - owner = ACCESS_ONCE(event->owner); - /* - * Matches the smp_wmb() in perf_event_exit_task(). If we observe - * !owner it means the list deletion is complete and we can indeed - * free this event, otherwise we need to serialize on - * owner->perf_event_mutex. - */ - smp_read_barrier_depends(); - if (owner) { - /* - * Since delayed_put_task_struct() also drops the last - * task reference we can safely take a new reference - * while holding the rcu_read_lock(). - */ - get_task_struct(owner); - } - rcu_read_unlock(); - - if (owner) { - mutex_lock(&owner->perf_event_mutex); - /* - * We have to re-check the event->owner field, if it is cleared - * we raced with perf_event_exit_task(), acquiring the mutex - * ensured they're done, and we can proceed with freeing the - * event. - */ - if (event->owner) - list_del_init(&event->owner_entry); - mutex_unlock(&owner->perf_event_mutex); - put_task_struct(owner); - } - - return perf_event_release_kernel(event); -} - -u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) -{ - struct perf_event *child; - u64 total = 0; - - *enabled = 0; - *running = 0; - - mutex_lock(&event->child_mutex); - total += perf_event_read(event); - *enabled += event->total_time_enabled + - atomic64_read(&event->child_total_time_enabled); - *running += event->total_time_running + - atomic64_read(&event->child_total_time_running); - - list_for_each_entry(child, &event->child_list, child_list) { - total += perf_event_read(child); - *enabled += child->total_time_enabled; - *running += child->total_time_running; - } - mutex_unlock(&event->child_mutex); - - return total; -} -EXPORT_SYMBOL_GPL(perf_event_read_value); - -static int perf_event_read_group(struct perf_event *event, - u64 read_format, char __user *buf) -{ - struct perf_event *leader = event->group_leader, *sub; - int n = 0, size = 0, ret = -EFAULT; - struct perf_event_context *ctx = leader->ctx; - u64 values[5]; - u64 count, enabled, running; - - mutex_lock(&ctx->mutex); - count = perf_event_read_value(leader, &enabled, &running); - - values[n++] = 1 + leader->nr_siblings; - if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) - values[n++] = enabled; - if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) - values[n++] = running; - values[n++] = count; - if (read_format & PERF_FORMAT_ID) - values[n++] = primary_event_id(leader); - - size = n * sizeof(u64); - - if (copy_to_user(buf, values, size)) - goto unlock; - - ret = size; - - list_for_each_entry(sub, &leader->sibling_list, group_entry) { - n = 0; - - values[n++] = perf_event_read_value(sub, &enabled, &running); - if (read_format & PERF_FORMAT_ID) - values[n++] = primary_event_id(sub); - - size = n * sizeof(u64); - - if (copy_to_user(buf + ret, values, size)) { - ret = -EFAULT; - goto unlock; - } - - ret += size; - } -unlock: - mutex_unlock(&ctx->mutex); - - return ret; -} - -static int perf_event_read_one(struct perf_event *event, - u64 read_format, char __user *buf) -{ - u64 enabled, running; - u64 values[4]; - int n = 0; - - values[n++] = perf_event_read_value(event, &enabled, &running); - if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) - values[n++] = enabled; - if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) - values[n++] = running; - if (read_format & PERF_FORMAT_ID) - values[n++] = primary_event_id(event); - - if (copy_to_user(buf, values, n * sizeof(u64))) - return -EFAULT; - - return n * sizeof(u64); -} - -/* - * Read the performance event - simple non blocking version for now - */ -static ssize_t -perf_read_hw(struct perf_event *event, char __user *buf, size_t count) -{ - u64 read_format = event->attr.read_format; - int ret; - - /* - * Return end-of-file for a read on a event that is in - * error state (i.e. because it was pinned but it couldn't be - * scheduled on to the CPU at some point). - */ - if (event->state == PERF_EVENT_STATE_ERROR) - return 0; - - if (count < event->read_size) - return -ENOSPC; - - WARN_ON_ONCE(event->ctx->parent_ctx); - if (read_format & PERF_FORMAT_GROUP) - ret = perf_event_read_group(event, read_format, buf); - else - ret = perf_event_read_one(event, read_format, buf); - - return ret; -} - -static ssize_t -perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) -{ - struct perf_event *event = file->private_data; - - return perf_read_hw(event, buf, count); -} - -static unsigned int perf_poll(struct file *file, poll_table *wait) -{ - struct perf_event *event = file->private_data; - struct perf_buffer *buffer; - unsigned int events = POLL_HUP; - - rcu_read_lock(); - buffer = rcu_dereference(event->buffer); - if (buffer) - events = atomic_xchg(&buffer->poll, 0); - rcu_read_unlock(); - - poll_wait(file, &event->waitq, wait); - - return events; -} - -static void perf_event_reset(struct perf_event *event) -{ - (void)perf_event_read(event); - local64_set(&event->count, 0); - perf_event_update_userpage(event); -} - -/* - * Holding the top-level event's child_mutex means that any - * descendant process that has inherited this event will block - * in sync_child_event if it goes to exit, thus satisfying the - * task existence requirements of perf_event_enable/disable. - */ -static void perf_event_for_each_child(struct perf_event *event, - void (*func)(struct perf_event *)) -{ - struct perf_event *child; - - WARN_ON_ONCE(event->ctx->parent_ctx); - mutex_lock(&event->child_mutex); - func(event); - list_for_each_entry(child, &event->child_list, child_list) - func(child); - mutex_unlock(&event->child_mutex); -} - -static void perf_event_for_each(struct perf_event *event, - void (*func)(struct perf_event *)) -{ - struct perf_event_context *ctx = event->ctx; - struct perf_event *sibling; - - WARN_ON_ONCE(ctx->parent_ctx); - mutex_lock(&ctx->mutex); - event = event->group_leader; - - perf_event_for_each_child(event, func); - func(event); - list_for_each_entry(sibling, &event->sibling_list, group_entry) - perf_event_for_each_child(event, func); - mutex_unlock(&ctx->mutex); -} - -static int perf_event_period(struct perf_event *event, u64 __user *arg) -{ - struct perf_event_context *ctx = event->ctx; - int ret = 0; - u64 value; - - if (!is_sampling_event(event)) - return -EINVAL; - - if (copy_from_user(&value, arg, sizeof(value))) - return -EFAULT; - - if (!value) - return -EINVAL; - - raw_spin_lock_irq(&ctx->lock); - if (event->attr.freq) { - if (value > sysctl_perf_event_sample_rate) { - ret = -EINVAL; - goto unlock; - } - - event->attr.sample_freq = value; - } else { - event->attr.sample_period = value; - event->hw.sample_period = value; - } -unlock: - raw_spin_unlock_irq(&ctx->lock); - - return ret; -} - -static const struct file_operations perf_fops; - -static struct perf_event *perf_fget_light(int fd, int *fput_needed) -{ - struct file *file; - - file = fget_light(fd, fput_needed); - if (!file) - return ERR_PTR(-EBADF); - - if (file->f_op != &perf_fops) { - fput_light(file, *fput_needed); - *fput_needed = 0; - return ERR_PTR(-EBADF); - } - - return file->private_data; -} - -static int perf_event_set_output(struct perf_event *event, - struct perf_event *output_event); -static int perf_event_set_filter(struct perf_event *event, void __user *arg); - -static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - struct perf_event *event = file->private_data; - void (*func)(struct perf_event *); - u32 flags = arg; - - switch (cmd) { - case PERF_EVENT_IOC_ENABLE: - func = perf_event_enable; - break; - case PERF_EVENT_IOC_DISABLE: - func = perf_event_disable; - break; - case PERF_EVENT_IOC_RESET: - func = perf_event_reset; - break; - - case PERF_EVENT_IOC_REFRESH: - return perf_event_refresh(event, arg); - - case PERF_EVENT_IOC_PERIOD: - return perf_event_period(event, (u64 __user *)arg); - - case PERF_EVENT_IOC_SET_OUTPUT: - { - struct perf_event *output_event = NULL; - int fput_needed = 0; - int ret; - - if (arg != -1) { - output_event = perf_fget_light(arg, &fput_needed); - if (IS_ERR(output_event)) - return PTR_ERR(output_event); - } - - ret = perf_event_set_output(event, output_event); - if (output_event) - fput_light(output_event->filp, fput_needed); - - return ret; - } - - case PERF_EVENT_IOC_SET_FILTER: - return perf_event_set_filter(event, (void __user *)arg); - - default: - return -ENOTTY; - } - - if (flags & PERF_IOC_FLAG_GROUP) - perf_event_for_each(event, func); - else - perf_event_for_each_child(event, func); - - return 0; -} - -int perf_event_task_enable(void) -{ - struct perf_event *event; - - mutex_lock(¤t->perf_event_mutex); - list_for_each_entry(event, ¤t->perf_event_list, owner_entry) - perf_event_for_each_child(event, perf_event_enable); - mutex_unlock(¤t->perf_event_mutex); - - return 0; -} - -int perf_event_task_disable(void) -{ - struct perf_event *event; - - mutex_lock(¤t->perf_event_mutex); - list_for_each_entry(event, ¤t->perf_event_list, owner_entry) - perf_event_for_each_child(event, perf_event_disable); - mutex_unlock(¤t->perf_event_mutex); - - return 0; -} - -#ifndef PERF_EVENT_INDEX_OFFSET -# define PERF_EVENT_INDEX_OFFSET 0 -#endif - -static int perf_event_index(struct perf_event *event) -{ - if (event->hw.state & PERF_HES_STOPPED) - return 0; - - if (event->state != PERF_EVENT_STATE_ACTIVE) - return 0; - - return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET; -} - -/* - * Callers need to ensure there can be no nesting of this function, otherwise - * the seqlock logic goes bad. We can not serialize this because the arch - * code calls this from NMI context. - */ -void perf_event_update_userpage(struct perf_event *event) -{ - struct perf_event_mmap_page *userpg; - struct perf_buffer *buffer; - - rcu_read_lock(); - buffer = rcu_dereference(event->buffer); - if (!buffer) - goto unlock; - - userpg = buffer->user_page; - - /* - * Disable preemption so as to not let the corresponding user-space - * spin too long if we get preempted. - */ - preempt_disable(); - ++userpg->lock; - barrier(); - userpg->index = perf_event_index(event); - userpg->offset = perf_event_count(event); - if (event->state == PERF_EVENT_STATE_ACTIVE) - userpg->offset -= local64_read(&event->hw.prev_count); - - userpg->time_enabled = event->total_time_enabled + - atomic64_read(&event->child_total_time_enabled); - - userpg->time_running = event->total_time_running + - atomic64_read(&event->child_total_time_running); - - barrier(); - ++userpg->lock; - preempt_enable(); -unlock: - rcu_read_unlock(); -} - -static unsigned long perf_data_size(struct perf_buffer *buffer); - -static void -perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags) -{ - long max_size = perf_data_size(buffer); - - if (watermark) - buffer->watermark = min(max_size, watermark); - - if (!buffer->watermark) - buffer->watermark = max_size / 2; - - if (flags & PERF_BUFFER_WRITABLE) - buffer->writable = 1; - - atomic_set(&buffer->refcount, 1); -} - -#ifndef CONFIG_PERF_USE_VMALLOC - -/* - * Back perf_mmap() with regular GFP_KERNEL-0 pages. - */ - -static struct page * -perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff) -{ - if (pgoff > buffer->nr_pages) - return NULL; - - if (pgoff == 0) - return virt_to_page(buffer->user_page); - - return virt_to_page(buffer->data_pages[pgoff - 1]); -} - -static void *perf_mmap_alloc_page(int cpu) -{ - struct page *page; - int node; - - node = (cpu == -1) ? cpu : cpu_to_node(cpu); - page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); - if (!page) - return NULL; - - return page_address(page); -} - -static struct perf_buffer * -perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags) -{ - struct perf_buffer *buffer; - unsigned long size; - int i; - - size = sizeof(struct perf_buffer); - size += nr_pages * sizeof(void *); - - buffer = kzalloc(size, GFP_KERNEL); - if (!buffer) - goto fail; - - buffer->user_page = perf_mmap_alloc_page(cpu); - if (!buffer->user_page) - goto fail_user_page; - - for (i = 0; i < nr_pages; i++) { - buffer->data_pages[i] = perf_mmap_alloc_page(cpu); - if (!buffer->data_pages[i]) - goto fail_data_pages; - } - - buffer->nr_pages = nr_pages; - - perf_buffer_init(buffer, watermark, flags); - - return buffer; - -fail_data_pages: - for (i--; i >= 0; i--) - free_page((unsigned long)buffer->data_pages[i]); - - free_page((unsigned long)buffer->user_page); - -fail_user_page: - kfree(buffer); - -fail: - return NULL; -} - -static void perf_mmap_free_page(unsigned long addr) -{ - struct page *page = virt_to_page((void *)addr); - - page->mapping = NULL; - __free_page(page); -} - -static void perf_buffer_free(struct perf_buffer *buffer) -{ - int i; - - perf_mmap_free_page((unsigned long)buffer->user_page); - for (i = 0; i < buffer->nr_pages; i++) - perf_mmap_free_page((unsigned long)buffer->data_pages[i]); - kfree(buffer); -} - -static inline int page_order(struct perf_buffer *buffer) -{ - return 0; -} - -#else - -/* - * Back perf_mmap() with vmalloc memory. - * - * Required for architectures that have d-cache aliasing issues. - */ - -static inline int page_order(struct perf_buffer *buffer) -{ - return buffer->page_order; -} - -static struct page * -perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff) -{ - if (pgoff > (1UL << page_order(buffer))) - return NULL; - - return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE); -} - -static void perf_mmap_unmark_page(void *addr) -{ - struct page *page = vmalloc_to_page(addr); - - page->mapping = NULL; -} - -static void perf_buffer_free_work(struct work_struct *work) -{ - struct perf_buffer *buffer; - void *base; - int i, nr; - - buffer = container_of(work, struct perf_buffer, work); - nr = 1 << page_order(buffer); - - base = buffer->user_page; - for (i = 0; i < nr + 1; i++) - perf_mmap_unmark_page(base + (i * PAGE_SIZE)); - - vfree(base); - kfree(buffer); -} - -static void perf_buffer_free(struct perf_buffer *buffer) -{ - schedule_work(&buffer->work); -} - -static struct perf_buffer * -perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags) -{ - struct perf_buffer *buffer; - unsigned long size; - void *all_buf; - - size = sizeof(struct perf_buffer); - size += sizeof(void *); - - buffer = kzalloc(size, GFP_KERNEL); - if (!buffer) - goto fail; - - INIT_WORK(&buffer->work, perf_buffer_free_work); - - all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); - if (!all_buf) - goto fail_all_buf; - - buffer->user_page = all_buf; - buffer->data_pages[0] = all_buf + PAGE_SIZE; - buffer->page_order = ilog2(nr_pages); - buffer->nr_pages = 1; - - perf_buffer_init(buffer, watermark, flags); - - return buffer; - -fail_all_buf: - kfree(buffer); - -fail: - return NULL; -} - -#endif - -static unsigned long perf_data_size(struct perf_buffer *buffer) -{ - return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer)); -} - -static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) -{ - struct perf_event *event = vma->vm_file->private_data; - struct perf_buffer *buffer; - int ret = VM_FAULT_SIGBUS; - - if (vmf->flags & FAULT_FLAG_MKWRITE) { - if (vmf->pgoff == 0) - ret = 0; - return ret; - } - - rcu_read_lock(); - buffer = rcu_dereference(event->buffer); - if (!buffer) - goto unlock; - - if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) - goto unlock; - - vmf->page = perf_mmap_to_page(buffer, vmf->pgoff); - if (!vmf->page) - goto unlock; - - get_page(vmf->page); - vmf->page->mapping = vma->vm_file->f_mapping; - vmf->page->index = vmf->pgoff; - - ret = 0; -unlock: - rcu_read_unlock(); - - return ret; -} - -static void perf_buffer_free_rcu(struct rcu_head *rcu_head) -{ - struct perf_buffer *buffer; - - buffer = container_of(rcu_head, struct perf_buffer, rcu_head); - perf_buffer_free(buffer); -} - -static struct perf_buffer *perf_buffer_get(struct perf_event *event) -{ - struct perf_buffer *buffer; - - rcu_read_lock(); - buffer = rcu_dereference(event->buffer); - if (buffer) { - if (!atomic_inc_not_zero(&buffer->refcount)) - buffer = NULL; - } - rcu_read_unlock(); - - return buffer; -} - -static void perf_buffer_put(struct perf_buffer *buffer) -{ - if (!atomic_dec_and_test(&buffer->refcount)) - return; - - call_rcu(&buffer->rcu_head, perf_buffer_free_rcu); -} - -static void perf_mmap_open(struct vm_area_struct *vma) -{ - struct perf_event *event = vma->vm_file->private_data; - - atomic_inc(&event->mmap_count); -} - -static void perf_mmap_close(struct vm_area_struct *vma) -{ - struct perf_event *event = vma->vm_file->private_data; - - if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { - unsigned long size = perf_data_size(event->buffer); - struct user_struct *user = event->mmap_user; - struct perf_buffer *buffer = event->buffer; - - atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); - vma->vm_mm->locked_vm -= event->mmap_locked; - rcu_assign_pointer(event->buffer, NULL); - mutex_unlock(&event->mmap_mutex); - - perf_buffer_put(buffer); - free_uid(user); - } -} - -static const struct vm_operations_struct perf_mmap_vmops = { - .open = perf_mmap_open, - .close = perf_mmap_close, - .fault = perf_mmap_fault, - .page_mkwrite = perf_mmap_fault, -}; - -static int perf_mmap(struct file *file, struct vm_area_struct *vma) -{ - struct perf_event *event = file->private_data; - unsigned long user_locked, user_lock_limit; - struct user_struct *user = current_user(); - unsigned long locked, lock_limit; - struct perf_buffer *buffer; - unsigned long vma_size; - unsigned long nr_pages; - long user_extra, extra; - int ret = 0, flags = 0; - - /* - * Don't allow mmap() of inherited per-task counters. This would - * create a performance issue due to all children writing to the - * same buffer. - */ - if (event->cpu == -1 && event->attr.inherit) - return -EINVAL; - - if (!(vma->vm_flags & VM_SHARED)) - return -EINVAL; - - vma_size = vma->vm_end - vma->vm_start; - nr_pages = (vma_size / PAGE_SIZE) - 1; - - /* - * If we have buffer pages ensure they're a power-of-two number, so we - * can do bitmasks instead of modulo. - */ - if (nr_pages != 0 && !is_power_of_2(nr_pages)) - return -EINVAL; - - if (vma_size != PAGE_SIZE * (1 + nr_pages)) - return -EINVAL; - - if (vma->vm_pgoff != 0) - return -EINVAL; - - WARN_ON_ONCE(event->ctx->parent_ctx); - mutex_lock(&event->mmap_mutex); - if (event->buffer) { - if (event->buffer->nr_pages == nr_pages) - atomic_inc(&event->buffer->refcount); - else - ret = -EINVAL; - goto unlock; - } - - user_extra = nr_pages + 1; - user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); - - /* - * Increase the limit linearly with more CPUs: - */ - user_lock_limit *= num_online_cpus(); - - user_locked = atomic_long_read(&user->locked_vm) + user_extra; - - extra = 0; - if (user_locked > user_lock_limit) - extra = user_locked - user_lock_limit; - - lock_limit = rlimit(RLIMIT_MEMLOCK); - lock_limit >>= PAGE_SHIFT; - locked = vma->vm_mm->locked_vm + extra; - - if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && - !capable(CAP_IPC_LOCK)) { - ret = -EPERM; - goto unlock; - } - - WARN_ON(event->buffer); - - if (vma->vm_flags & VM_WRITE) - flags |= PERF_BUFFER_WRITABLE; - - buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark, - event->cpu, flags); - if (!buffer) { - ret = -ENOMEM; - goto unlock; - } - rcu_assign_pointer(event->buffer, buffer); - - atomic_long_add(user_extra, &user->locked_vm); - event->mmap_locked = extra; - event->mmap_user = get_current_user(); - vma->vm_mm->locked_vm += event->mmap_locked; - -unlock: - if (!ret) - atomic_inc(&event->mmap_count); - mutex_unlock(&event->mmap_mutex); - - vma->vm_flags |= VM_RESERVED; - vma->vm_ops = &perf_mmap_vmops; - - return ret; -} - -static int perf_fasync(int fd, struct file *filp, int on) -{ - struct inode *inode = filp->f_path.dentry->d_inode; - struct perf_event *event = filp->private_data; - int retval; - - mutex_lock(&inode->i_mutex); - retval = fasync_helper(fd, filp, on, &event->fasync); - mutex_unlock(&inode->i_mutex); - - if (retval < 0) - return retval; - - return 0; -} - -static const struct file_operations perf_fops = { - .llseek = no_llseek, - .release = perf_release, - .read = perf_read, - .poll = perf_poll, - .unlocked_ioctl = perf_ioctl, - .compat_ioctl = perf_ioctl, - .mmap = perf_mmap, - .fasync = perf_fasync, -}; - -/* - * Perf event wakeup - * - * If there's data, ensure we set the poll() state and publish everything - * to user-space before waking everybody up. - */ - -void perf_event_wakeup(struct perf_event *event) -{ - wake_up_all(&event->waitq); - - if (event->pending_kill) { - kill_fasync(&event->fasync, SIGIO, event->pending_kill); - event->pending_kill = 0; - } -} - -static void perf_pending_event(struct irq_work *entry) -{ - struct perf_event *event = container_of(entry, - struct perf_event, pending); - - if (event->pending_disable) { - event->pending_disable = 0; - __perf_event_disable(event); - } - - if (event->pending_wakeup) { - event->pending_wakeup = 0; - perf_event_wakeup(event); - } -} - -/* - * We assume there is only KVM supporting the callbacks. - * Later on, we might change it to a list if there is - * another virtualization implementation supporting the callbacks. - */ -struct perf_guest_info_callbacks *perf_guest_cbs; - -int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) -{ - perf_guest_cbs = cbs; - return 0; -} -EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); - -int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) -{ - perf_guest_cbs = NULL; - return 0; -} -EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); - -/* - * Output - */ -static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail, - unsigned long offset, unsigned long head) -{ - unsigned long mask; - - if (!buffer->writable) - return true; - - mask = perf_data_size(buffer) - 1; - - offset = (offset - tail) & mask; - head = (head - tail) & mask; - - if ((int)(head - offset) < 0) - return false; - - return true; -} - -static void perf_output_wakeup(struct perf_output_handle *handle) -{ - atomic_set(&handle->buffer->poll, POLL_IN); - - if (handle->nmi) { - handle->event->pending_wakeup = 1; - irq_work_queue(&handle->event->pending); - } else - perf_event_wakeup(handle->event); -} - -/* - * We need to ensure a later event_id doesn't publish a head when a former - * event isn't done writing. However since we need to deal with NMIs we - * cannot fully serialize things. - * - * We only publish the head (and generate a wakeup) when the outer-most - * event completes. - */ -static void perf_output_get_handle(struct perf_output_handle *handle) -{ - struct perf_buffer *buffer = handle->buffer; - - preempt_disable(); - local_inc(&buffer->nest); - handle->wakeup = local_read(&buffer->wakeup); -} - -static void perf_output_put_handle(struct perf_output_handle *handle) -{ - struct perf_buffer *buffer = handle->buffer; - unsigned long head; - -again: - head = local_read(&buffer->head); - - /* - * IRQ/NMI can happen here, which means we can miss a head update. - */ - - if (!local_dec_and_test(&buffer->nest)) - goto out; - - /* - * Publish the known good head. Rely on the full barrier implied - * by atomic_dec_and_test() order the buffer->head read and this - * write. - */ - buffer->user_page->data_head = head; - - /* - * Now check if we missed an update, rely on the (compiler) - * barrier in atomic_dec_and_test() to re-read buffer->head. - */ - if (unlikely(head != local_read(&buffer->head))) { - local_inc(&buffer->nest); - goto again; - } - - if (handle->wakeup != local_read(&buffer->wakeup)) - perf_output_wakeup(handle); - -out: - preempt_enable(); -} - -__always_inline void perf_output_copy(struct perf_output_handle *handle, - const void *buf, unsigned int len) -{ - do { - unsigned long size = min_t(unsigned long, handle->size, len); - - memcpy(handle->addr, buf, size); - - len -= size; - handle->addr += size; - buf += size; - handle->size -= size; - if (!handle->size) { - struct perf_buffer *buffer = handle->buffer; - - handle->page++; - handle->page &= buffer->nr_pages - 1; - handle->addr = buffer->data_pages[handle->page]; - handle->size = PAGE_SIZE << page_order(buffer); - } - } while (len); -} - -static void __perf_event_header__init_id(struct perf_event_header *header, - struct perf_sample_data *data, - struct perf_event *event) -{ - u64 sample_type = event->attr.sample_type; - - data->type = sample_type; - header->size += event->id_header_size; - - if (sample_type & PERF_SAMPLE_TID) { - /* namespace issues */ - data->tid_entry.pid = perf_event_pid(event, current); - data->tid_entry.tid = perf_event_tid(event, current); - } - - if (sample_type & PERF_SAMPLE_TIME) - data->time = perf_clock(); - - if (sample_type & PERF_SAMPLE_ID) - data->id = primary_event_id(event); - - if (sample_type & PERF_SAMPLE_STREAM_ID) - data->stream_id = event->id; - - if (sample_type & PERF_SAMPLE_CPU) { - data->cpu_entry.cpu = raw_smp_processor_id(); - data->cpu_entry.reserved = 0; - } -} - -static void perf_event_header__init_id(struct perf_event_header *header, - struct perf_sample_data *data, - struct perf_event *event) -{ - if (event->attr.sample_id_all) - __perf_event_header__init_id(header, data, event); -} - -static void __perf_event__output_id_sample(struct perf_output_handle *handle, - struct perf_sample_data *data) -{ - u64 sample_type = data->type; - - if (sample_type & PERF_SAMPLE_TID) - perf_output_put(handle, data->tid_entry); - - if (sample_type & PERF_SAMPLE_TIME) - perf_output_put(handle, data->time); - - if (sample_type & PERF_SAMPLE_ID) - perf_output_put(handle, data->id); - - if (sample_type & PERF_SAMPLE_STREAM_ID) - perf_output_put(handle, data->stream_id); - - if (sample_type & PERF_SAMPLE_CPU) - perf_output_put(handle, data->cpu_entry); -} - -static void perf_event__output_id_sample(struct perf_event *event, - struct perf_output_handle *handle, - struct perf_sample_data *sample) -{ - if (event->attr.sample_id_all) - __perf_event__output_id_sample(handle, sample); -} - -int perf_output_begin(struct perf_output_handle *handle, - struct perf_event *event, unsigned int size, - int nmi, int sample) -{ - struct perf_buffer *buffer; - unsigned long tail, offset, head; - int have_lost; - struct perf_sample_data sample_data; - struct { - struct perf_event_header header; - u64 id; - u64 lost; - } lost_event; - - rcu_read_lock(); - /* - * For inherited events we send all the output towards the parent. - */ - if (event->parent) - event = event->parent; - - buffer = rcu_dereference(event->buffer); - if (!buffer) - goto out; - - handle->buffer = buffer; - handle->event = event; - handle->nmi = nmi; - handle->sample = sample; - - if (!buffer->nr_pages) - goto out; - - have_lost = local_read(&buffer->lost); - if (have_lost) { - lost_event.header.size = sizeof(lost_event); - perf_event_header__init_id(&lost_event.header, &sample_data, - event); - size += lost_event.header.size; - } - - perf_output_get_handle(handle); - - do { - /* - * Userspace could choose to issue a mb() before updating the - * tail pointer. So that all reads will be completed before the - * write is issued. - */ - tail = ACCESS_ONCE(buffer->user_page->data_tail); - smp_rmb(); - offset = head = local_read(&buffer->head); - head += size; - if (unlikely(!perf_output_space(buffer, tail, offset, head))) - goto fail; - } while (local_cmpxchg(&buffer->head, offset, head) != offset); - - if (head - local_read(&buffer->wakeup) > buffer->watermark) - local_add(buffer->watermark, &buffer->wakeup); - - handle->page = offset >> (PAGE_SHIFT + page_order(buffer)); - handle->page &= buffer->nr_pages - 1; - handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1); - handle->addr = buffer->data_pages[handle->page]; - handle->addr += handle->size; - handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size; - - if (have_lost) { - lost_event.header.type = PERF_RECORD_LOST; - lost_event.header.misc = 0; - lost_event.id = event->id; - lost_event.lost = local_xchg(&buffer->lost, 0); - - perf_output_put(handle, lost_event); - perf_event__output_id_sample(event, handle, &sample_data); - } - - return 0; - -fail: - local_inc(&buffer->lost); - perf_output_put_handle(handle); -out: - rcu_read_unlock(); - - return -ENOSPC; -} - -void perf_output_end(struct perf_output_handle *handle) -{ - struct perf_event *event = handle->event; - struct perf_buffer *buffer = handle->buffer; - - int wakeup_events = event->attr.wakeup_events; - - if (handle->sample && wakeup_events) { - int events = local_inc_return(&buffer->events); - if (events >= wakeup_events) { - local_sub(wakeup_events, &buffer->events); - local_inc(&buffer->wakeup); - } - } - - perf_output_put_handle(handle); - rcu_read_unlock(); -} - -static void perf_output_read_one(struct perf_output_handle *handle, - struct perf_event *event, - u64 enabled, u64 running) -{ - u64 read_format = event->attr.read_format; - u64 values[4]; - int n = 0; - - values[n++] = perf_event_count(event); - if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { - values[n++] = enabled + - atomic64_read(&event->child_total_time_enabled); - } - if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { - values[n++] = running + - atomic64_read(&event->child_total_time_running); - } - if (read_format & PERF_FORMAT_ID) - values[n++] = primary_event_id(event); - - perf_output_copy(handle, values, n * sizeof(u64)); -} - -/* - * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. - */ -static void perf_output_read_group(struct perf_output_handle *handle, - struct perf_event *event, - u64 enabled, u64 running) -{ - struct perf_event *leader = event->group_leader, *sub; - u64 read_format = event->attr.read_format; - u64 values[5]; - int n = 0; - - values[n++] = 1 + leader->nr_siblings; - - if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) - values[n++] = enabled; - - if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) - values[n++] = running; - - if (leader != event) - leader->pmu->read(leader); - - values[n++] = perf_event_count(leader); - if (read_format & PERF_FORMAT_ID) - values[n++] = primary_event_id(leader); - - perf_output_copy(handle, values, n * sizeof(u64)); - - list_for_each_entry(sub, &leader->sibling_list, group_entry) { - n = 0; - - if (sub != event) - sub->pmu->read(sub); - - values[n++] = perf_event_count(sub); - if (read_format & PERF_FORMAT_ID) - values[n++] = primary_event_id(sub); - - perf_output_copy(handle, values, n * sizeof(u64)); - } -} - -#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ - PERF_FORMAT_TOTAL_TIME_RUNNING) - -static void perf_output_read(struct perf_output_handle *handle, - struct perf_event *event) -{ - u64 enabled = 0, running = 0, now, ctx_time; - u64 read_format = event->attr.read_format; - - /* - * compute total_time_enabled, total_time_running - * based on snapshot values taken when the event - * was last scheduled in. - * - * we cannot simply called update_context_time() - * because of locking issue as we are called in - * NMI context - */ - if (read_format & PERF_FORMAT_TOTAL_TIMES) { - now = perf_clock(); - ctx_time = event->shadow_ctx_time + now; - enabled = ctx_time - event->tstamp_enabled; - running = ctx_time - event->tstamp_running; - } - - if (event->attr.read_format & PERF_FORMAT_GROUP) - perf_output_read_group(handle, event, enabled, running); - else - perf_output_read_one(handle, event, enabled, running); -} - -void perf_output_sample(struct perf_output_handle *handle, - struct perf_event_header *header, - struct perf_sample_data *data, - struct perf_event *event) -{ - u64 sample_type = data->type; - - perf_output_put(handle, *header); - - if (sample_type & PERF_SAMPLE_IP) - perf_output_put(handle, data->ip); - - if (sample_type & PERF_SAMPLE_TID) - perf_output_put(handle, data->tid_entry); - - if (sample_type & PERF_SAMPLE_TIME) - perf_output_put(handle, data->time); - - if (sample_type & PERF_SAMPLE_ADDR) - perf_output_put(handle, data->addr); - - if (sample_type & PERF_SAMPLE_ID) - perf_output_put(handle, data->id); - - if (sample_type & PERF_SAMPLE_STREAM_ID) - perf_output_put(handle, data->stream_id); - - if (sample_type & PERF_SAMPLE_CPU) - perf_output_put(handle, data->cpu_entry); - - if (sample_type & PERF_SAMPLE_PERIOD) - perf_output_put(handle, data->period); - - if (sample_type & PERF_SAMPLE_READ) - perf_output_read(handle, event); - - if (sample_type & PERF_SAMPLE_CALLCHAIN) { - if (data->callchain) { - int size = 1; - - if (data->callchain) - size += data->callchain->nr; - - size *= sizeof(u64); - - perf_output_copy(handle, data->callchain, size); - } else { - u64 nr = 0; - perf_output_put(handle, nr); - } - } - - if (sample_type & PERF_SAMPLE_RAW) { - if (data->raw) { - perf_output_put(handle, data->raw->size); - perf_output_copy(handle, data->raw->data, - data->raw->size); - } else { - struct { - u32 size; - u32 data; - } raw = { - .size = sizeof(u32), - .data = 0, - }; - perf_output_put(handle, raw); - } - } -} - -void perf_prepare_sample(struct perf_event_header *header, - struct perf_sample_data *data, - struct perf_event *event, - struct pt_regs *regs) -{ - u64 sample_type = event->attr.sample_type; - - header->type = PERF_RECORD_SAMPLE; - header->size = sizeof(*header) + event->header_size; - - header->misc = 0; - header->misc |= perf_misc_flags(regs); - - __perf_event_header__init_id(header, data, event); - - if (sample_type & PERF_SAMPLE_IP) - data->ip = perf_instruction_pointer(regs); - - if (sample_type & PERF_SAMPLE_CALLCHAIN) { - int size = 1; - - data->callchain = perf_callchain(regs); - - if (data->callchain) - size += data->callchain->nr; - - header->size += size * sizeof(u64); - } - - if (sample_type & PERF_SAMPLE_RAW) { - int size = sizeof(u32); - - if (data->raw) - size += data->raw->size; - else - size += sizeof(u32); - - WARN_ON_ONCE(size & (sizeof(u64)-1)); - header->size += size; - } -} - -static void perf_event_output(struct perf_event *event, int nmi, - struct perf_sample_data *data, - struct pt_regs *regs) -{ - struct perf_output_handle handle; - struct perf_event_header header; - - /* protect the callchain buffers */ - rcu_read_lock(); - - perf_prepare_sample(&header, data, event, regs); - - if (perf_output_begin(&handle, event, header.size, nmi, 1)) - goto exit; - - perf_output_sample(&handle, &header, data, event); - - perf_output_end(&handle); - -exit: - rcu_read_unlock(); -} - -/* - * read event_id - */ - -struct perf_read_event { - struct perf_event_header header; - - u32 pid; - u32 tid; -}; - -static void -perf_event_read_event(struct perf_event *event, - struct task_struct *task) -{ - struct perf_output_handle handle; - struct perf_sample_data sample; - struct perf_read_event read_event = { - .header = { - .type = PERF_RECORD_READ, - .misc = 0, - .size = sizeof(read_event) + event->read_size, - }, - .pid = perf_event_pid(event, task), - .tid = perf_event_tid(event, task), - }; - int ret; - - perf_event_header__init_id(&read_event.header, &sample, event); - ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0); - if (ret) - return; - - perf_output_put(&handle, read_event); - perf_output_read(&handle, event); - perf_event__output_id_sample(event, &handle, &sample); - - perf_output_end(&handle); -} - -/* - * task tracking -- fork/exit - * - * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task - */ - -struct perf_task_event { - struct task_struct *task; - struct perf_event_context *task_ctx; - - struct { - struct perf_event_header header; - - u32 pid; - u32 ppid; - u32 tid; - u32 ptid; - u64 time; - } event_id; -}; - -static void perf_event_task_output(struct perf_event *event, - struct perf_task_event *task_event) -{ - struct perf_output_handle handle; - struct perf_sample_data sample; - struct task_struct *task = task_event->task; - int ret, size = task_event->event_id.header.size; - - perf_event_header__init_id(&task_event->event_id.header, &sample, event); - - ret = perf_output_begin(&handle, event, - task_event->event_id.header.size, 0, 0); - if (ret) - goto out; - - task_event->event_id.pid = perf_event_pid(event, task); - task_event->event_id.ppid = perf_event_pid(event, current); - - task_event->event_id.tid = perf_event_tid(event, task); - task_event->event_id.ptid = perf_event_tid(event, current); - - perf_output_put(&handle, task_event->event_id); - - perf_event__output_id_sample(event, &handle, &sample); - - perf_output_end(&handle); -out: - task_event->event_id.header.size = size; -} - -static int perf_event_task_match(struct perf_event *event) -{ - if (event->state < PERF_EVENT_STATE_INACTIVE) - return 0; - - if (!event_filter_match(event)) - return 0; - - if (event->attr.comm || event->attr.mmap || - event->attr.mmap_data || event->attr.task) - return 1; - - return 0; -} - -static void perf_event_task_ctx(struct perf_event_context *ctx, - struct perf_task_event *task_event) -{ - struct perf_event *event; - - list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { - if (perf_event_task_match(event)) - perf_event_task_output(event, task_event); - } -} - -static void perf_event_task_event(struct perf_task_event *task_event) -{ - struct perf_cpu_context *cpuctx; - struct perf_event_context *ctx; - struct pmu *pmu; - int ctxn; - - rcu_read_lock(); - list_for_each_entry_rcu(pmu, &pmus, entry) { - cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); - if (cpuctx->active_pmu != pmu) - goto next; - perf_event_task_ctx(&cpuctx->ctx, task_event); - - ctx = task_event->task_ctx; - if (!ctx) { - ctxn = pmu->task_ctx_nr; - if (ctxn < 0) - goto next; - ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); - } - if (ctx) - perf_event_task_ctx(ctx, task_event); -next: - put_cpu_ptr(pmu->pmu_cpu_context); - } - rcu_read_unlock(); -} - -static void perf_event_task(struct task_struct *task, - struct perf_event_context *task_ctx, - int new) -{ - struct perf_task_event task_event; - - if (!atomic_read(&nr_comm_events) && - !atomic_read(&nr_mmap_events) && - !atomic_read(&nr_task_events)) - return; - - task_event = (struct perf_task_event){ - .task = task, - .task_ctx = task_ctx, - .event_id = { - .header = { - .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, - .misc = 0, - .size = sizeof(task_event.event_id), - }, - /* .pid */ - /* .ppid */ - /* .tid */ - /* .ptid */ - .time = perf_clock(), - }, - }; - - perf_event_task_event(&task_event); -} - -void perf_event_fork(struct task_struct *task) -{ - perf_event_task(task, NULL, 1); -} - -/* - * comm tracking - */ - -struct perf_comm_event { - struct task_struct *task; - char *comm; - int comm_size; - - struct { - struct perf_event_header header; - - u32 pid; - u32 tid; - } event_id; -}; - -static void perf_event_comm_output(struct perf_event *event, - struct perf_comm_event *comm_event) -{ - struct perf_output_handle handle; - struct perf_sample_data sample; - int size = comm_event->event_id.header.size; - int ret; - - perf_event_header__init_id(&comm_event->event_id.header, &sample, event); - ret = perf_output_begin(&handle, event, - comm_event->event_id.header.size, 0, 0); - - if (ret) - goto out; - - comm_event->event_id.pid = perf_event_pid(event, comm_event->task); - comm_event->event_id.tid = perf_event_tid(event, comm_event->task); - - perf_output_put(&handle, comm_event->event_id); - perf_output_copy(&handle, comm_event->comm, - comm_event->comm_size); - - perf_event__output_id_sample(event, &handle, &sample); - - perf_output_end(&handle); -out: - comm_event->event_id.header.size = size; -} - -static int perf_event_comm_match(struct perf_event *event) -{ - if (event->state < PERF_EVENT_STATE_INACTIVE) - return 0; - - if (!event_filter_match(event)) - return 0; - - if (event->attr.comm) - return 1; - - return 0; -} - -static void perf_event_comm_ctx(struct perf_event_context *ctx, - struct perf_comm_event *comm_event) -{ - struct perf_event *event; - - list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { - if (perf_event_comm_match(event)) - perf_event_comm_output(event, comm_event); - } -} - -static void perf_event_comm_event(struct perf_comm_event *comm_event) -{ - struct perf_cpu_context *cpuctx; - struct perf_event_context *ctx; - char comm[TASK_COMM_LEN]; - unsigned int size; - struct pmu *pmu; - int ctxn; - - memset(comm, 0, sizeof(comm)); - strlcpy(comm, comm_event->task->comm, sizeof(comm)); - size = ALIGN(strlen(comm)+1, sizeof(u64)); - - comm_event->comm = comm; - comm_event->comm_size = size; - - comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; - rcu_read_lock(); - list_for_each_entry_rcu(pmu, &pmus, entry) { - cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); - if (cpuctx->active_pmu != pmu) - goto next; - perf_event_comm_ctx(&cpuctx->ctx, comm_event); - - ctxn = pmu->task_ctx_nr; - if (ctxn < 0) - goto next; - - ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); - if (ctx) - perf_event_comm_ctx(ctx, comm_event); -next: - put_cpu_ptr(pmu->pmu_cpu_context); - } - rcu_read_unlock(); -} - -void perf_event_comm(struct task_struct *task) -{ - struct perf_comm_event comm_event; - struct perf_event_context *ctx; - int ctxn; - - for_each_task_context_nr(ctxn) { - ctx = task->perf_event_ctxp[ctxn]; - if (!ctx) - continue; - - perf_event_enable_on_exec(ctx); - } - - if (!atomic_read(&nr_comm_events)) - return; - - comm_event = (struct perf_comm_event){ - .task = task, - /* .comm */ - /* .comm_size */ - .event_id = { - .header = { - .type = PERF_RECORD_COMM, - .misc = 0, - /* .size */ - }, - /* .pid */ - /* .tid */ - }, - }; - - perf_event_comm_event(&comm_event); -} - -/* - * mmap tracking - */ - -struct perf_mmap_event { - struct vm_area_struct *vma; - - const char *file_name; - int file_size; - - struct { - struct perf_event_header header; - - u32 pid; - u32 tid; - u64 start; - u64 len; - u64 pgoff; - } event_id; -}; - -static void perf_event_mmap_output(struct perf_event *event, - struct perf_mmap_event *mmap_event) -{ - struct perf_output_handle handle; - struct perf_sample_data sample; - int size = mmap_event->event_id.header.size; - int ret; - - perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); - ret = perf_output_begin(&handle, event, - mmap_event->event_id.header.size, 0, 0); - if (ret) - goto out; - - mmap_event->event_id.pid = perf_event_pid(event, current); - mmap_event->event_id.tid = perf_event_tid(event, current); - - perf_output_put(&handle, mmap_event->event_id); - perf_output_copy(&handle, mmap_event->file_name, - mmap_event->file_size); - - perf_event__output_id_sample(event, &handle, &sample); - - perf_output_end(&handle); -out: - mmap_event->event_id.header.size = size; -} - -static int perf_event_mmap_match(struct perf_event *event, - struct perf_mmap_event *mmap_event, - int executable) -{ - if (event->state < PERF_EVENT_STATE_INACTIVE) - return 0; - - if (!event_filter_match(event)) - return 0; - - if ((!executable && event->attr.mmap_data) || - (executable && event->attr.mmap)) - return 1; - - return 0; -} - -static void perf_event_mmap_ctx(struct perf_event_context *ctx, - struct perf_mmap_event *mmap_event, - int executable) -{ - struct perf_event *event; - - list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { - if (perf_event_mmap_match(event, mmap_event, executable)) - perf_event_mmap_output(event, mmap_event); - } -} - -static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) -{ - struct perf_cpu_context *cpuctx; - struct perf_event_context *ctx; - struct vm_area_struct *vma = mmap_event->vma; - struct file *file = vma->vm_file; - unsigned int size; - char tmp[16]; - char *buf = NULL; - const char *name; - struct pmu *pmu; - int ctxn; - - memset(tmp, 0, sizeof(tmp)); - - if (file) { - /* - * d_path works from the end of the buffer backwards, so we - * need to add enough zero bytes after the string to handle - * the 64bit alignment we do later. - */ - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL); - if (!buf) { - name = strncpy(tmp, "//enomem", sizeof(tmp)); - goto got_name; - } - name = d_path(&file->f_path, buf, PATH_MAX); - if (IS_ERR(name)) { - name = strncpy(tmp, "//toolong", sizeof(tmp)); - goto got_name; - } - } else { - if (arch_vma_name(mmap_event->vma)) { - name = strncpy(tmp, arch_vma_name(mmap_event->vma), - sizeof(tmp)); - goto got_name; - } - - if (!vma->vm_mm) { - name = strncpy(tmp, "[vdso]", sizeof(tmp)); - goto got_name; - } else if (vma->vm_start <= vma->vm_mm->start_brk && - vma->vm_end >= vma->vm_mm->brk) { - name = strncpy(tmp, "[heap]", sizeof(tmp)); - goto got_name; - } else if (vma->vm_start <= vma->vm_mm->start_stack && - vma->vm_end >= vma->vm_mm->start_stack) { - name = strncpy(tmp, "[stack]", sizeof(tmp)); - goto got_name; - } - - name = strncpy(tmp, "//anon", sizeof(tmp)); - goto got_name; - } - -got_name: - size = ALIGN(strlen(name)+1, sizeof(u64)); - - mmap_event->file_name = name; - mmap_event->file_size = size; - - mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; - - rcu_read_lock(); - list_for_each_entry_rcu(pmu, &pmus, entry) { - cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); - if (cpuctx->active_pmu != pmu) - goto next; - perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, - vma->vm_flags & VM_EXEC); - - ctxn = pmu->task_ctx_nr; - if (ctxn < 0) - goto next; - - ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); - if (ctx) { - perf_event_mmap_ctx(ctx, mmap_event, - vma->vm_flags & VM_EXEC); - } -next: - put_cpu_ptr(pmu->pmu_cpu_context); - } - rcu_read_unlock(); - - kfree(buf); -} - -void perf_event_mmap(struct vm_area_struct *vma) -{ - struct perf_mmap_event mmap_event; - - if (!atomic_read(&nr_mmap_events)) - return; - - mmap_event = (struct perf_mmap_event){ - .vma = vma, - /* .file_name */ - /* .file_size */ - .event_id = { - .header = { - .type = PERF_RECORD_MMAP, - .misc = PERF_RECORD_MISC_USER, - /* .size */ - }, - /* .pid */ - /* .tid */ - .start = vma->vm_start, - .len = vma->vm_end - vma->vm_start, - .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, - }, - }; - - perf_event_mmap_event(&mmap_event); -} - -/* - * IRQ throttle logging - */ - -static void perf_log_throttle(struct perf_event *event, int enable) -{ - struct perf_output_handle handle; - struct perf_sample_data sample; - int ret; - - struct { - struct perf_event_header header; - u64 time; - u64 id; - u64 stream_id; - } throttle_event = { - .header = { - .type = PERF_RECORD_THROTTLE, - .misc = 0, - .size = sizeof(throttle_event), - }, - .time = perf_clock(), - .id = primary_event_id(event), - .stream_id = event->id, - }; - - if (enable) - throttle_event.header.type = PERF_RECORD_UNTHROTTLE; - - perf_event_header__init_id(&throttle_event.header, &sample, event); - - ret = perf_output_begin(&handle, event, - throttle_event.header.size, 1, 0); - if (ret) - return; - - perf_output_put(&handle, throttle_event); - perf_event__output_id_sample(event, &handle, &sample); - perf_output_end(&handle); -} - -/* - * Generic event overflow handling, sampling. - */ - -static int __perf_event_overflow(struct perf_event *event, int nmi, - int throttle, struct perf_sample_data *data, - struct pt_regs *regs) -{ - int events = atomic_read(&event->event_limit); - struct hw_perf_event *hwc = &event->hw; - int ret = 0; - - /* - * Non-sampling counters might still use the PMI to fold short - * hardware counters, ignore those. - */ - if (unlikely(!is_sampling_event(event))) - return 0; - - if (unlikely(hwc->interrupts >= max_samples_per_tick)) { - if (throttle) { - hwc->interrupts = MAX_INTERRUPTS; - perf_log_throttle(event, 0); - ret = 1; - } - } else - hwc->interrupts++; - - if (event->attr.freq) { - u64 now = perf_clock(); - s64 delta = now - hwc->freq_time_stamp; - - hwc->freq_time_stamp = now; - - if (delta > 0 && delta < 2*TICK_NSEC) - perf_adjust_period(event, delta, hwc->last_period); - } - - /* - * XXX event_limit might not quite work as expected on inherited - * events - */ - - event->pending_kill = POLL_IN; - if (events && atomic_dec_and_test(&event->event_limit)) { - ret = 1; - event->pending_kill = POLL_HUP; - if (nmi) { - event->pending_disable = 1; - irq_work_queue(&event->pending); - } else - perf_event_disable(event); - } - - if (event->overflow_handler) - event->overflow_handler(event, nmi, data, regs); - else - perf_event_output(event, nmi, data, regs); - - return ret; -} - -int perf_event_overflow(struct perf_event *event, int nmi, - struct perf_sample_data *data, - struct pt_regs *regs) -{ - return __perf_event_overflow(event, nmi, 1, data, regs); -} - -/* - * Generic software event infrastructure - */ - -struct swevent_htable { - struct swevent_hlist *swevent_hlist; - struct mutex hlist_mutex; - int hlist_refcount; - - /* Recursion avoidance in each contexts */ - int recursion[PERF_NR_CONTEXTS]; -}; - -static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); - -/* - * We directly increment event->count and keep a second value in - * event->hw.period_left to count intervals. This period event - * is kept in the range [-sample_period, 0] so that we can use the - * sign as trigger. - */ - -static u64 perf_swevent_set_period(struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - u64 period = hwc->last_period; - u64 nr, offset; - s64 old, val; - - hwc->last_period = hwc->sample_period; - -again: - old = val = local64_read(&hwc->period_left); - if (val < 0) - return 0; - - nr = div64_u64(period + val, period); - offset = nr * period; - val -= offset; - if (local64_cmpxchg(&hwc->period_left, old, val) != old) - goto again; - - return nr; -} - -static void perf_swevent_overflow(struct perf_event *event, u64 overflow, - int nmi, struct perf_sample_data *data, - struct pt_regs *regs) -{ - struct hw_perf_event *hwc = &event->hw; - int throttle = 0; - - data->period = event->hw.last_period; - if (!overflow) - overflow = perf_swevent_set_period(event); - - if (hwc->interrupts == MAX_INTERRUPTS) - return; - - for (; overflow; overflow--) { - if (__perf_event_overflow(event, nmi, throttle, - data, regs)) { - /* - * We inhibit the overflow from happening when - * hwc->interrupts == MAX_INTERRUPTS. - */ - break; - } - throttle = 1; - } -} - -static void perf_swevent_event(struct perf_event *event, u64 nr, - int nmi, struct perf_sample_data *data, - struct pt_regs *regs) -{ - struct hw_perf_event *hwc = &event->hw; - - local64_add(nr, &event->count); - - if (!regs) - return; - - if (!is_sampling_event(event)) - return; - - if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) - return perf_swevent_overflow(event, 1, nmi, data, regs); - - if (local64_add_negative(nr, &hwc->period_left)) - return; - - perf_swevent_overflow(event, 0, nmi, data, regs); -} - -static int perf_exclude_event(struct perf_event *event, - struct pt_regs *regs) -{ - if (event->hw.state & PERF_HES_STOPPED) - return 1; - - if (regs) { - if (event->attr.exclude_user && user_mode(regs)) - return 1; - - if (event->attr.exclude_kernel && !user_mode(regs)) - return 1; - } - - return 0; -} - -static int perf_swevent_match(struct perf_event *event, - enum perf_type_id type, - u32 event_id, - struct perf_sample_data *data, - struct pt_regs *regs) -{ - if (event->attr.type != type) - return 0; - - if (event->attr.config != event_id) - return 0; - - if (perf_exclude_event(event, regs)) - return 0; - - return 1; -} - -static inline u64 swevent_hash(u64 type, u32 event_id) -{ - u64 val = event_id | (type << 32); - - return hash_64(val, SWEVENT_HLIST_BITS); -} - -static inline struct hlist_head * -__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) -{ - u64 hash = swevent_hash(type, event_id); - - return &hlist->heads[hash]; -} - -/* For the read side: events when they trigger */ -static inline struct hlist_head * -find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id) -{ - struct swevent_hlist *hlist; - - hlist = rcu_dereference(swhash->swevent_hlist); - if (!hlist) - return NULL; - - return __find_swevent_head(hlist, type, event_id); -} - -/* For the event head insertion and removal in the hlist */ -static inline struct hlist_head * -find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) -{ - struct swevent_hlist *hlist; - u32 event_id = event->attr.config; - u64 type = event->attr.type; - - /* - * Event scheduling is always serialized against hlist allocation - * and release. Which makes the protected version suitable here. - * The context lock guarantees that. - */ - hlist = rcu_dereference_protected(swhash->swevent_hlist, - lockdep_is_held(&event->ctx->lock)); - if (!hlist) - return NULL; - - return __find_swevent_head(hlist, type, event_id); -} - -static void do_perf_sw_event(enum perf_type_id type, u32 event_id, - u64 nr, int nmi, - struct perf_sample_data *data, - struct pt_regs *regs) -{ - struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); - struct perf_event *event; - struct hlist_node *node; - struct hlist_head *head; - - rcu_read_lock(); - head = find_swevent_head_rcu(swhash, type, event_id); - if (!head) - goto end; - - hlist_for_each_entry_rcu(event, node, head, hlist_entry) { - if (perf_swevent_match(event, type, event_id, data, regs)) - perf_swevent_event(event, nr, nmi, data, regs); - } -end: - rcu_read_unlock(); -} - -int perf_swevent_get_recursion_context(void) -{ - struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); - - return get_recursion_context(swhash->recursion); -} -EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); - -inline void perf_swevent_put_recursion_context(int rctx) -{ - struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); - - put_recursion_context(swhash->recursion, rctx); -} - -void __perf_sw_event(u32 event_id, u64 nr, int nmi, - struct pt_regs *regs, u64 addr) -{ - struct perf_sample_data data; - int rctx; - - preempt_disable_notrace(); - rctx = perf_swevent_get_recursion_context(); - if (rctx < 0) - return; - - perf_sample_data_init(&data, addr); - - do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs); - - perf_swevent_put_recursion_context(rctx); - preempt_enable_notrace(); -} - -static void perf_swevent_read(struct perf_event *event) -{ -} - -static int perf_swevent_add(struct perf_event *event, int flags) -{ - struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); - struct hw_perf_event *hwc = &event->hw; - struct hlist_head *head; - - if (is_sampling_event(event)) { - hwc->last_period = hwc->sample_period; - perf_swevent_set_period(event); - } - - hwc->state = !(flags & PERF_EF_START); - - head = find_swevent_head(swhash, event); - if (WARN_ON_ONCE(!head)) - return -EINVAL; - - hlist_add_head_rcu(&event->hlist_entry, head); - - return 0; -} - -static void perf_swevent_del(struct perf_event *event, int flags) -{ - hlist_del_rcu(&event->hlist_entry); -} - -static void perf_swevent_start(struct perf_event *event, int flags) -{ - event->hw.state = 0; -} - -static void perf_swevent_stop(struct perf_event *event, int flags) -{ - event->hw.state = PERF_HES_STOPPED; -} - -/* Deref the hlist from the update side */ -static inline struct swevent_hlist * -swevent_hlist_deref(struct swevent_htable *swhash) -{ - return rcu_dereference_protected(swhash->swevent_hlist, - lockdep_is_held(&swhash->hlist_mutex)); -} - -static void swevent_hlist_release_rcu(struct rcu_head *rcu_head) -{ - struct swevent_hlist *hlist; - - hlist = container_of(rcu_head, struct swevent_hlist, rcu_head); - kfree(hlist); -} - -static void swevent_hlist_release(struct swevent_htable *swhash) -{ - struct swevent_hlist *hlist = swevent_hlist_deref(swhash); - - if (!hlist) - return; - - rcu_assign_pointer(swhash->swevent_hlist, NULL); - call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu); -} - -static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) -{ - struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); - - mutex_lock(&swhash->hlist_mutex); - - if (!--swhash->hlist_refcount) - swevent_hlist_release(swhash); - - mutex_unlock(&swhash->hlist_mutex); -} - -static void swevent_hlist_put(struct perf_event *event) -{ - int cpu; - - if (event->cpu != -1) { - swevent_hlist_put_cpu(event, event->cpu); - return; - } - - for_each_possible_cpu(cpu) - swevent_hlist_put_cpu(event, cpu); -} - -static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) -{ - struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); - int err = 0; - - mutex_lock(&swhash->hlist_mutex); - - if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { - struct swevent_hlist *hlist; - - hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); - if (!hlist) { - err = -ENOMEM; - goto exit; - } - rcu_assign_pointer(swhash->swevent_hlist, hlist); - } - swhash->hlist_refcount++; -exit: - mutex_unlock(&swhash->hlist_mutex); - - return err; -} - -static int swevent_hlist_get(struct perf_event *event) -{ - int err; - int cpu, failed_cpu; - - if (event->cpu != -1) - return swevent_hlist_get_cpu(event, event->cpu); - - get_online_cpus(); - for_each_possible_cpu(cpu) { - err = swevent_hlist_get_cpu(event, cpu); - if (err) { - failed_cpu = cpu; - goto fail; - } - } - put_online_cpus(); - - return 0; -fail: - for_each_possible_cpu(cpu) { - if (cpu == failed_cpu) - break; - swevent_hlist_put_cpu(event, cpu); - } - - put_online_cpus(); - return err; -} - -struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; - -static void sw_perf_event_destroy(struct perf_event *event) -{ - u64 event_id = event->attr.config; - - WARN_ON(event->parent); - - jump_label_dec(&perf_swevent_enabled[event_id]); - swevent_hlist_put(event); -} - -static int perf_swevent_init(struct perf_event *event) -{ - int event_id = event->attr.config; - - if (event->attr.type != PERF_TYPE_SOFTWARE) - return -ENOENT; - - switch (event_id) { - case PERF_COUNT_SW_CPU_CLOCK: - case PERF_COUNT_SW_TASK_CLOCK: - return -ENOENT; - - default: - break; - } - - if (event_id >= PERF_COUNT_SW_MAX) - return -ENOENT; - - if (!event->parent) { - int err; - - err = swevent_hlist_get(event); - if (err) - return err; - - jump_label_inc(&perf_swevent_enabled[event_id]); - event->destroy = sw_perf_event_destroy; - } - - return 0; -} - -static struct pmu perf_swevent = { - .task_ctx_nr = perf_sw_context, - - .event_init = perf_swevent_init, - .add = perf_swevent_add, - .del = perf_swevent_del, - .start = perf_swevent_start, - .stop = perf_swevent_stop, - .read = perf_swevent_read, -}; - -#ifdef CONFIG_EVENT_TRACING - -static int perf_tp_filter_match(struct perf_event *event, - struct perf_sample_data *data) -{ - void *record = data->raw->data; - - if (likely(!event->filter) || filter_match_preds(event->filter, record)) - return 1; - return 0; -} - -static int perf_tp_event_match(struct perf_event *event, - struct perf_sample_data *data, - struct pt_regs *regs) -{ - if (event->hw.state & PERF_HES_STOPPED) - return 0; - /* - * All tracepoints are from kernel-space. - */ - if (event->attr.exclude_kernel) - return 0; - - if (!perf_tp_filter_match(event, data)) - return 0; - - return 1; -} - -void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, - struct pt_regs *regs, struct hlist_head *head, int rctx) -{ - struct perf_sample_data data; - struct perf_event *event; - struct hlist_node *node; - - struct perf_raw_record raw = { - .size = entry_size, - .data = record, - }; - - perf_sample_data_init(&data, addr); - data.raw = &raw; - - hlist_for_each_entry_rcu(event, node, head, hlist_entry) { - if (perf_tp_event_match(event, &data, regs)) - perf_swevent_event(event, count, 1, &data, regs); - } - - perf_swevent_put_recursion_context(rctx); -} -EXPORT_SYMBOL_GPL(perf_tp_event); - -static void tp_perf_event_destroy(struct perf_event *event) -{ - perf_trace_destroy(event); -} - -static int perf_tp_event_init(struct perf_event *event) -{ - int err; - - if (event->attr.type != PERF_TYPE_TRACEPOINT) - return -ENOENT; - - err = perf_trace_init(event); - if (err) - return err; - - event->destroy = tp_perf_event_destroy; - - return 0; -} - -static struct pmu perf_tracepoint = { - .task_ctx_nr = perf_sw_context, - - .event_init = perf_tp_event_init, - .add = perf_trace_add, - .del = perf_trace_del, - .start = perf_swevent_start, - .stop = perf_swevent_stop, - .read = perf_swevent_read, -}; - -static inline void perf_tp_register(void) -{ - perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT); -} - -static int perf_event_set_filter(struct perf_event *event, void __user *arg) -{ - char *filter_str; - int ret; - - if (event->attr.type != PERF_TYPE_TRACEPOINT) - return -EINVAL; - - filter_str = strndup_user(arg, PAGE_SIZE); - if (IS_ERR(filter_str)) - return PTR_ERR(filter_str); - - ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); - - kfree(filter_str); - return ret; -} - -static void perf_event_free_filter(struct perf_event *event) -{ - ftrace_profile_free_filter(event); -} - -#else - -static inline void perf_tp_register(void) -{ -} - -static int perf_event_set_filter(struct perf_event *event, void __user *arg) -{ - return -ENOENT; -} - -static void perf_event_free_filter(struct perf_event *event) -{ -} - -#endif /* CONFIG_EVENT_TRACING */ - -#ifdef CONFIG_HAVE_HW_BREAKPOINT -void perf_bp_event(struct perf_event *bp, void *data) -{ - struct perf_sample_data sample; - struct pt_regs *regs = data; - - perf_sample_data_init(&sample, bp->attr.bp_addr); - - if (!bp->hw.state && !perf_exclude_event(bp, regs)) - perf_swevent_event(bp, 1, 1, &sample, regs); -} -#endif - -/* - * hrtimer based swevent callback - */ - -static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) -{ - enum hrtimer_restart ret = HRTIMER_RESTART; - struct perf_sample_data data; - struct pt_regs *regs; - struct perf_event *event; - u64 period; - - event = container_of(hrtimer, struct perf_event, hw.hrtimer); - - if (event->state != PERF_EVENT_STATE_ACTIVE) - return HRTIMER_NORESTART; - - event->pmu->read(event); - - perf_sample_data_init(&data, 0); - data.period = event->hw.last_period; - regs = get_irq_regs(); - - if (regs && !perf_exclude_event(event, regs)) { - if (!(event->attr.exclude_idle && current->pid == 0)) - if (perf_event_overflow(event, 0, &data, regs)) - ret = HRTIMER_NORESTART; - } - - period = max_t(u64, 10000, event->hw.sample_period); - hrtimer_forward_now(hrtimer, ns_to_ktime(period)); - - return ret; -} - -static void perf_swevent_start_hrtimer(struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - s64 period; - - if (!is_sampling_event(event)) - return; - - period = local64_read(&hwc->period_left); - if (period) { - if (period < 0) - period = 10000; - - local64_set(&hwc->period_left, 0); - } else { - period = max_t(u64, 10000, hwc->sample_period); - } - __hrtimer_start_range_ns(&hwc->hrtimer, - ns_to_ktime(period), 0, - HRTIMER_MODE_REL_PINNED, 0); -} - -static void perf_swevent_cancel_hrtimer(struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - - if (is_sampling_event(event)) { - ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); - local64_set(&hwc->period_left, ktime_to_ns(remaining)); - - hrtimer_cancel(&hwc->hrtimer); - } -} - -static void perf_swevent_init_hrtimer(struct perf_event *event) -{ - struct hw_perf_event *hwc = &event->hw; - - if (!is_sampling_event(event)) - return; - - hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - hwc->hrtimer.function = perf_swevent_hrtimer; - - /* - * Since hrtimers have a fixed rate, we can do a static freq->period - * mapping and avoid the whole period adjust feedback stuff. - */ - if (event->attr.freq) { - long freq = event->attr.sample_freq; - - event->attr.sample_period = NSEC_PER_SEC / freq; - hwc->sample_period = event->attr.sample_period; - local64_set(&hwc->period_left, hwc->sample_period); - event->attr.freq = 0; - } -} - -/* - * Software event: cpu wall time clock - */ - -static void cpu_clock_event_update(struct perf_event *event) -{ - s64 prev; - u64 now; - - now = local_clock(); - prev = local64_xchg(&event->hw.prev_count, now); - local64_add(now - prev, &event->count); -} - -static void cpu_clock_event_start(struct perf_event *event, int flags) -{ - local64_set(&event->hw.prev_count, local_clock()); - perf_swevent_start_hrtimer(event); -} - -static void cpu_clock_event_stop(struct perf_event *event, int flags) -{ - perf_swevent_cancel_hrtimer(event); - cpu_clock_event_update(event); -} - -static int cpu_clock_event_add(struct perf_event *event, int flags) -{ - if (flags & PERF_EF_START) - cpu_clock_event_start(event, flags); - - return 0; -} - -static void cpu_clock_event_del(struct perf_event *event, int flags) -{ - cpu_clock_event_stop(event, flags); -} - -static void cpu_clock_event_read(struct perf_event *event) -{ - cpu_clock_event_update(event); -} - -static int cpu_clock_event_init(struct perf_event *event) -{ - if (event->attr.type != PERF_TYPE_SOFTWARE) - return -ENOENT; - - if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) - return -ENOENT; - - perf_swevent_init_hrtimer(event); - - return 0; -} - -static struct pmu perf_cpu_clock = { - .task_ctx_nr = perf_sw_context, - - .event_init = cpu_clock_event_init, - .add = cpu_clock_event_add, - .del = cpu_clock_event_del, - .start = cpu_clock_event_start, - .stop = cpu_clock_event_stop, - .read = cpu_clock_event_read, -}; - -/* - * Software event: task time clock - */ - -static void task_clock_event_update(struct perf_event *event, u64 now) -{ - u64 prev; - s64 delta; - - prev = local64_xchg(&event->hw.prev_count, now); - delta = now - prev; - local64_add(delta, &event->count); -} - -static void task_clock_event_start(struct perf_event *event, int flags) -{ - local64_set(&event->hw.prev_count, event->ctx->time); - perf_swevent_start_hrtimer(event); -} - -static void task_clock_event_stop(struct perf_event *event, int flags) -{ - perf_swevent_cancel_hrtimer(event); - task_clock_event_update(event, event->ctx->time); -} - -static int task_clock_event_add(struct perf_event *event, int flags) -{ - if (flags & PERF_EF_START) - task_clock_event_start(event, flags); - - return 0; -} - -static void task_clock_event_del(struct perf_event *event, int flags) -{ - task_clock_event_stop(event, PERF_EF_UPDATE); -} - -static void task_clock_event_read(struct perf_event *event) -{ - u64 now = perf_clock(); - u64 delta = now - event->ctx->timestamp; - u64 time = event->ctx->time + delta; - - task_clock_event_update(event, time); -} - -static int task_clock_event_init(struct perf_event *event) -{ - if (event->attr.type != PERF_TYPE_SOFTWARE) - return -ENOENT; - - if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) - return -ENOENT; - - perf_swevent_init_hrtimer(event); - - return 0; -} - -static struct pmu perf_task_clock = { - .task_ctx_nr = perf_sw_context, - - .event_init = task_clock_event_init, - .add = task_clock_event_add, - .del = task_clock_event_del, - .start = task_clock_event_start, - .stop = task_clock_event_stop, - .read = task_clock_event_read, -}; - -static void perf_pmu_nop_void(struct pmu *pmu) -{ -} - -static int perf_pmu_nop_int(struct pmu *pmu) -{ - return 0; -} - -static void perf_pmu_start_txn(struct pmu *pmu) -{ - perf_pmu_disable(pmu); -} - -static int perf_pmu_commit_txn(struct pmu *pmu) -{ - perf_pmu_enable(pmu); - return 0; -} - -static void perf_pmu_cancel_txn(struct pmu *pmu) -{ - perf_pmu_enable(pmu); -} - -/* - * Ensures all contexts with the same task_ctx_nr have the same - * pmu_cpu_context too. - */ -static void *find_pmu_context(int ctxn) -{ - struct pmu *pmu; - - if (ctxn < 0) - return NULL; - - list_for_each_entry(pmu, &pmus, entry) { - if (pmu->task_ctx_nr == ctxn) - return pmu->pmu_cpu_context; - } - - return NULL; -} - -static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu) -{ - int cpu; - - for_each_possible_cpu(cpu) { - struct perf_cpu_context *cpuctx; - - cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); - - if (cpuctx->active_pmu == old_pmu) - cpuctx->active_pmu = pmu; - } -} - -static void free_pmu_context(struct pmu *pmu) -{ - struct pmu *i; - - mutex_lock(&pmus_lock); - /* - * Like a real lame refcount. - */ - list_for_each_entry(i, &pmus, entry) { - if (i->pmu_cpu_context == pmu->pmu_cpu_context) { - update_pmu_context(i, pmu); - goto out; - } - } - - free_percpu(pmu->pmu_cpu_context); -out: - mutex_unlock(&pmus_lock); -} -static struct idr pmu_idr; - -static ssize_t -type_show(struct device *dev, struct device_attribute *attr, char *page) -{ - struct pmu *pmu = dev_get_drvdata(dev); - - return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); -} - -static struct device_attribute pmu_dev_attrs[] = { - __ATTR_RO(type), - __ATTR_NULL, -}; - -static int pmu_bus_running; -static struct bus_type pmu_bus = { - .name = "event_source", - .dev_attrs = pmu_dev_attrs, -}; - -static void pmu_dev_release(struct device *dev) -{ - kfree(dev); -} - -static int pmu_dev_alloc(struct pmu *pmu) -{ - int ret = -ENOMEM; - - pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); - if (!pmu->dev) - goto out; - - device_initialize(pmu->dev); - ret = dev_set_name(pmu->dev, "%s", pmu->name); - if (ret) - goto free_dev; - - dev_set_drvdata(pmu->dev, pmu); - pmu->dev->bus = &pmu_bus; - pmu->dev->release = pmu_dev_release; - ret = device_add(pmu->dev); - if (ret) - goto free_dev; - -out: - return ret; - -free_dev: - put_device(pmu->dev); - goto out; -} - -static struct lock_class_key cpuctx_mutex; - -int perf_pmu_register(struct pmu *pmu, char *name, int type) -{ - int cpu, ret; - - mutex_lock(&pmus_lock); - ret = -ENOMEM; - pmu->pmu_disable_count = alloc_percpu(int); - if (!pmu->pmu_disable_count) - goto unlock; - - pmu->type = -1; - if (!name) - goto skip_type; - pmu->name = name; - - if (type < 0) { - int err = idr_pre_get(&pmu_idr, GFP_KERNEL); - if (!err) - goto free_pdc; - - err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type); - if (err) { - ret = err; - goto free_pdc; - } - } - pmu->type = type; - - if (pmu_bus_running) { - ret = pmu_dev_alloc(pmu); - if (ret) - goto free_idr; - } - -skip_type: - pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); - if (pmu->pmu_cpu_context) - goto got_cpu_context; - - pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); - if (!pmu->pmu_cpu_context) - goto free_dev; - - for_each_possible_cpu(cpu) { - struct perf_cpu_context *cpuctx; - - cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); - __perf_event_init_context(&cpuctx->ctx); - lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); - cpuctx->ctx.type = cpu_context; - cpuctx->ctx.pmu = pmu; - cpuctx->jiffies_interval = 1; - INIT_LIST_HEAD(&cpuctx->rotation_list); - cpuctx->active_pmu = pmu; - } - -got_cpu_context: - if (!pmu->start_txn) { - if (pmu->pmu_enable) { - /* - * If we have pmu_enable/pmu_disable calls, install - * transaction stubs that use that to try and batch - * hardware accesses. - */ - pmu->start_txn = perf_pmu_start_txn; - pmu->commit_txn = perf_pmu_commit_txn; - pmu->cancel_txn = perf_pmu_cancel_txn; - } else { - pmu->start_txn = perf_pmu_nop_void; - pmu->commit_txn = perf_pmu_nop_int; - pmu->cancel_txn = perf_pmu_nop_void; - } - } - - if (!pmu->pmu_enable) { - pmu->pmu_enable = perf_pmu_nop_void; - pmu->pmu_disable = perf_pmu_nop_void; - } - - list_add_rcu(&pmu->entry, &pmus); - ret = 0; -unlock: - mutex_unlock(&pmus_lock); - - return ret; - -free_dev: - device_del(pmu->dev); - put_device(pmu->dev); - -free_idr: - if (pmu->type >= PERF_TYPE_MAX) - idr_remove(&pmu_idr, pmu->type); - -free_pdc: - free_percpu(pmu->pmu_disable_count); - goto unlock; -} - -void perf_pmu_unregister(struct pmu *pmu) -{ - mutex_lock(&pmus_lock); - list_del_rcu(&pmu->entry); - mutex_unlock(&pmus_lock); - - /* - * We dereference the pmu list under both SRCU and regular RCU, so - * synchronize against both of those. - */ - synchronize_srcu(&pmus_srcu); - synchronize_rcu(); - - free_percpu(pmu->pmu_disable_count); - if (pmu->type >= PERF_TYPE_MAX) - idr_remove(&pmu_idr, pmu->type); - device_del(pmu->dev); - put_device(pmu->dev); - free_pmu_context(pmu); -} - -struct pmu *perf_init_event(struct perf_event *event) -{ - struct pmu *pmu = NULL; - int idx; - int ret; - - idx = srcu_read_lock(&pmus_srcu); - - rcu_read_lock(); - pmu = idr_find(&pmu_idr, event->attr.type); - rcu_read_unlock(); - if (pmu) { - ret = pmu->event_init(event); - if (ret) - pmu = ERR_PTR(ret); - goto unlock; - } - - list_for_each_entry_rcu(pmu, &pmus, entry) { - ret = pmu->event_init(event); - if (!ret) - goto unlock; - - if (ret != -ENOENT) { - pmu = ERR_PTR(ret); - goto unlock; - } - } - pmu = ERR_PTR(-ENOENT); -unlock: - srcu_read_unlock(&pmus_srcu, idx); - - return pmu; -} - -/* - * Allocate and initialize a event structure - */ -static struct perf_event * -perf_event_alloc(struct perf_event_attr *attr, int cpu, - struct task_struct *task, - struct perf_event *group_leader, - struct perf_event *parent_event, - perf_overflow_handler_t overflow_handler) -{ - struct pmu *pmu; - struct perf_event *event; - struct hw_perf_event *hwc; - long err; - - if ((unsigned)cpu >= nr_cpu_ids) { - if (!task || cpu != -1) - return ERR_PTR(-EINVAL); - } - - event = kzalloc(sizeof(*event), GFP_KERNEL); - if (!event) - return ERR_PTR(-ENOMEM); - - /* - * Single events are their own group leaders, with an - * empty sibling list: - */ - if (!group_leader) - group_leader = event; - - mutex_init(&event->child_mutex); - INIT_LIST_HEAD(&event->child_list); - - INIT_LIST_HEAD(&event->group_entry); - INIT_LIST_HEAD(&event->event_entry); - INIT_LIST_HEAD(&event->sibling_list); - init_waitqueue_head(&event->waitq); - init_irq_work(&event->pending, perf_pending_event); - - mutex_init(&event->mmap_mutex); - - event->cpu = cpu; - event->attr = *attr; - event->group_leader = group_leader; - event->pmu = NULL; - event->oncpu = -1; - - event->parent = parent_event; - - event->ns = get_pid_ns(current->nsproxy->pid_ns); - event->id = atomic64_inc_return(&perf_event_id); - - event->state = PERF_EVENT_STATE_INACTIVE; - - if (task) { - event->attach_state = PERF_ATTACH_TASK; -#ifdef CONFIG_HAVE_HW_BREAKPOINT - /* - * hw_breakpoint is a bit difficult here.. - */ - if (attr->type == PERF_TYPE_BREAKPOINT) - event->hw.bp_target = task; -#endif - } - - if (!overflow_handler && parent_event) - overflow_handler = parent_event->overflow_handler; - - event->overflow_handler = overflow_handler; - - if (attr->disabled) - event->state = PERF_EVENT_STATE_OFF; - - pmu = NULL; - - hwc = &event->hw; - hwc->sample_period = attr->sample_period; - if (attr->freq && attr->sample_freq) - hwc->sample_period = 1; - hwc->last_period = hwc->sample_period; - - local64_set(&hwc->period_left, hwc->sample_period); - - /* - * we currently do not support PERF_FORMAT_GROUP on inherited events - */ - if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) - goto done; - - pmu = perf_init_event(event); - -done: - err = 0; - if (!pmu) - err = -EINVAL; - else if (IS_ERR(pmu)) - err = PTR_ERR(pmu); - - if (err) { - if (event->ns) - put_pid_ns(event->ns); - kfree(event); - return ERR_PTR(err); - } - - event->pmu = pmu; - - if (!event->parent) { - if (event->attach_state & PERF_ATTACH_TASK) - jump_label_inc(&perf_sched_events); - if (event->attr.mmap || event->attr.mmap_data) - atomic_inc(&nr_mmap_events); - if (event->attr.comm) - atomic_inc(&nr_comm_events); - if (event->attr.task) - atomic_inc(&nr_task_events); - if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { - err = get_callchain_buffers(); - if (err) { - free_event(event); - return ERR_PTR(err); - } - } - } - - return event; -} - -static int perf_copy_attr(struct perf_event_attr __user *uattr, - struct perf_event_attr *attr) -{ - u32 size; - int ret; - - if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) - return -EFAULT; - - /* - * zero the full structure, so that a short copy will be nice. - */ - memset(attr, 0, sizeof(*attr)); - - ret = get_user(size, &uattr->size); - if (ret) - return ret; - - if (size > PAGE_SIZE) /* silly large */ - goto err_size; - - if (!size) /* abi compat */ - size = PERF_ATTR_SIZE_VER0; - - if (size < PERF_ATTR_SIZE_VER0) - goto err_size; - - /* - * If we're handed a bigger struct than we know of, - * ensure all the unknown bits are 0 - i.e. new - * user-space does not rely on any kernel feature - * extensions we dont know about yet. - */ - if (size > sizeof(*attr)) { - unsigned char __user *addr; - unsigned char __user *end; - unsigned char val; - - addr = (void __user *)uattr + sizeof(*attr); - end = (void __user *)uattr + size; - - for (; addr < end; addr++) { - ret = get_user(val, addr); - if (ret) - return ret; - if (val) - goto err_size; - } - size = sizeof(*attr); - } - - ret = copy_from_user(attr, uattr, size); - if (ret) - return -EFAULT; - - /* - * If the type exists, the corresponding creation will verify - * the attr->config. - */ - if (attr->type >= PERF_TYPE_MAX) - return -EINVAL; - - if (attr->__reserved_1) - return -EINVAL; - - if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) - return -EINVAL; - - if (attr->read_format & ~(PERF_FORMAT_MAX-1)) - return -EINVAL; - -out: - return ret; - -err_size: - put_user(sizeof(*attr), &uattr->size); - ret = -E2BIG; - goto out; -} - -static int -perf_event_set_output(struct perf_event *event, struct perf_event *output_event) -{ - struct perf_buffer *buffer = NULL, *old_buffer = NULL; - int ret = -EINVAL; - - if (!output_event) - goto set; - - /* don't allow circular references */ - if (event == output_event) - goto out; - - /* - * Don't allow cross-cpu buffers - */ - if (output_event->cpu != event->cpu) - goto out; - - /* - * If its not a per-cpu buffer, it must be the same task. - */ - if (output_event->cpu == -1 && output_event->ctx != event->ctx) - goto out; - -set: - mutex_lock(&event->mmap_mutex); - /* Can't redirect output if we've got an active mmap() */ - if (atomic_read(&event->mmap_count)) - goto unlock; - - if (output_event) { - /* get the buffer we want to redirect to */ - buffer = perf_buffer_get(output_event); - if (!buffer) - goto unlock; - } - - old_buffer = event->buffer; - rcu_assign_pointer(event->buffer, buffer); - ret = 0; -unlock: - mutex_unlock(&event->mmap_mutex); - - if (old_buffer) - perf_buffer_put(old_buffer); -out: - return ret; -} - -/** - * sys_perf_event_open - open a performance event, associate it to a task/cpu - * - * @attr_uptr: event_id type attributes for monitoring/sampling - * @pid: target pid - * @cpu: target cpu - * @group_fd: group leader event fd - */ -SYSCALL_DEFINE5(perf_event_open, - struct perf_event_attr __user *, attr_uptr, - pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) -{ - struct perf_event *group_leader = NULL, *output_event = NULL; - struct perf_event *event, *sibling; - struct perf_event_attr attr; - struct perf_event_context *ctx; - struct file *event_file = NULL; - struct file *group_file = NULL; - struct task_struct *task = NULL; - struct pmu *pmu; - int event_fd; - int move_group = 0; - int fput_needed = 0; - int err; - - /* for future expandability... */ - if (flags & ~PERF_FLAG_ALL) - return -EINVAL; - - err = perf_copy_attr(attr_uptr, &attr); - if (err) - return err; - - if (!attr.exclude_kernel) { - if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) - return -EACCES; - } - - if (attr.freq) { - if (attr.sample_freq > sysctl_perf_event_sample_rate) - return -EINVAL; - } - - /* - * In cgroup mode, the pid argument is used to pass the fd - * opened to the cgroup directory in cgroupfs. The cpu argument - * designates the cpu on which to monitor threads from that - * cgroup. - */ - if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1)) - return -EINVAL; - - event_fd = get_unused_fd_flags(O_RDWR); - if (event_fd < 0) - return event_fd; - - if (group_fd != -1) { - group_leader = perf_fget_light(group_fd, &fput_needed); - if (IS_ERR(group_leader)) { - err = PTR_ERR(group_leader); - goto err_fd; - } - group_file = group_leader->filp; - if (flags & PERF_FLAG_FD_OUTPUT) - output_event = group_leader; - if (flags & PERF_FLAG_FD_NO_GROUP) - group_leader = NULL; - } - - if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { - task = find_lively_task_by_vpid(pid); - if (IS_ERR(task)) { - err = PTR_ERR(task); - goto err_group_fd; - } - } - - event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL); - if (IS_ERR(event)) { - err = PTR_ERR(event); - goto err_task; - } - - if (flags & PERF_FLAG_PID_CGROUP) { - err = perf_cgroup_connect(pid, event, &attr, group_leader); - if (err) - goto err_alloc; - /* - * one more event: - * - that has cgroup constraint on event->cpu - * - that may need work on context switch - */ - atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); - jump_label_inc(&perf_sched_events); - } - - /* - * Special case software events and allow them to be part of - * any hardware group. - */ - pmu = event->pmu; - - if (group_leader && - (is_software_event(event) != is_software_event(group_leader))) { - if (is_software_event(event)) { - /* - * If event and group_leader are not both a software - * event, and event is, then group leader is not. - * - * Allow the addition of software events to !software - * groups, this is safe because software events never - * fail to schedule. - */ - pmu = group_leader->pmu; - } else if (is_software_event(group_leader) && - (group_leader->group_flags & PERF_GROUP_SOFTWARE)) { - /* - * In case the group is a pure software group, and we - * try to add a hardware event, move the whole group to - * the hardware context. - */ - move_group = 1; - } - } - - /* - * Get the target context (task or percpu): - */ - ctx = find_get_context(pmu, task, cpu); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto err_alloc; - } - - if (task) { - put_task_struct(task); - task = NULL; - } - - /* - * Look up the group leader (we will attach this event to it): - */ - if (group_leader) { - err = -EINVAL; - - /* - * Do not allow a recursive hierarchy (this new sibling - * becoming part of another group-sibling): - */ - if (group_leader->group_leader != group_leader) - goto err_context; - /* - * Do not allow to attach to a group in a different - * task or CPU context: - */ - if (move_group) { - if (group_leader->ctx->type != ctx->type) - goto err_context; - } else { - if (group_leader->ctx != ctx) - goto err_context; - } - - /* - * Only a group leader can be exclusive or pinned - */ - if (attr.exclusive || attr.pinned) - goto err_context; - } - - if (output_event) { - err = perf_event_set_output(event, output_event); - if (err) - goto err_context; - } - - event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR); - if (IS_ERR(event_file)) { - err = PTR_ERR(event_file); - goto err_context; - } - - if (move_group) { - struct perf_event_context *gctx = group_leader->ctx; - - mutex_lock(&gctx->mutex); - perf_remove_from_context(group_leader); - list_for_each_entry(sibling, &group_leader->sibling_list, - group_entry) { - perf_remove_from_context(sibling); - put_ctx(gctx); - } - mutex_unlock(&gctx->mutex); - put_ctx(gctx); - } - - event->filp = event_file; - WARN_ON_ONCE(ctx->parent_ctx); - mutex_lock(&ctx->mutex); - - if (move_group) { - perf_install_in_context(ctx, group_leader, cpu); - get_ctx(ctx); - list_for_each_entry(sibling, &group_leader->sibling_list, - group_entry) { - perf_install_in_context(ctx, sibling, cpu); - get_ctx(ctx); - } - } - - perf_install_in_context(ctx, event, cpu); - ++ctx->generation; - perf_unpin_context(ctx); - mutex_unlock(&ctx->mutex); - - event->owner = current; - - mutex_lock(¤t->perf_event_mutex); - list_add_tail(&event->owner_entry, ¤t->perf_event_list); - mutex_unlock(¤t->perf_event_mutex); - - /* - * Precalculate sample_data sizes - */ - perf_event__header_size(event); - perf_event__id_header_size(event); - - /* - * Drop the reference on the group_event after placing the - * new event on the sibling_list. This ensures destruction - * of the group leader will find the pointer to itself in - * perf_group_detach(). - */ - fput_light(group_file, fput_needed); - fd_install(event_fd, event_file); - return event_fd; - -err_context: - perf_unpin_context(ctx); - put_ctx(ctx); -err_alloc: - free_event(event); -err_task: - if (task) - put_task_struct(task); -err_group_fd: - fput_light(group_file, fput_needed); -err_fd: - put_unused_fd(event_fd); - return err; -} - -/** - * perf_event_create_kernel_counter - * - * @attr: attributes of the counter to create - * @cpu: cpu in which the counter is bound - * @task: task to profile (NULL for percpu) - */ -struct perf_event * -perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, - struct task_struct *task, - perf_overflow_handler_t overflow_handler) -{ - struct perf_event_context *ctx; - struct perf_event *event; - int err; - - /* - * Get the target context (task or percpu): - */ - - event = perf_event_alloc(attr, cpu, task, NULL, NULL, overflow_handler); - if (IS_ERR(event)) { - err = PTR_ERR(event); - goto err; - } - - ctx = find_get_context(event->pmu, task, cpu); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto err_free; - } - - event->filp = NULL; - WARN_ON_ONCE(ctx->parent_ctx); - mutex_lock(&ctx->mutex); - perf_install_in_context(ctx, event, cpu); - ++ctx->generation; - perf_unpin_context(ctx); - mutex_unlock(&ctx->mutex); - - return event; - -err_free: - free_event(event); -err: - return ERR_PTR(err); -} -EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); - -static void sync_child_event(struct perf_event *child_event, - struct task_struct *child) -{ - struct perf_event *parent_event = child_event->parent; - u64 child_val; - - if (child_event->attr.inherit_stat) - perf_event_read_event(child_event, child); - - child_val = perf_event_count(child_event); - - /* - * Add back the child's count to the parent's count: - */ - atomic64_add(child_val, &parent_event->child_count); - atomic64_add(child_event->total_time_enabled, - &parent_event->child_total_time_enabled); - atomic64_add(child_event->total_time_running, - &parent_event->child_total_time_running); - - /* - * Remove this event from the parent's list - */ - WARN_ON_ONCE(parent_event->ctx->parent_ctx); - mutex_lock(&parent_event->child_mutex); - list_del_init(&child_event->child_list); - mutex_unlock(&parent_event->child_mutex); - - /* - * Release the parent event, if this was the last - * reference to it. - */ - fput(parent_event->filp); -} - -static void -__perf_event_exit_task(struct perf_event *child_event, - struct perf_event_context *child_ctx, - struct task_struct *child) -{ - if (child_event->parent) { - raw_spin_lock_irq(&child_ctx->lock); - perf_group_detach(child_event); - raw_spin_unlock_irq(&child_ctx->lock); - } - - perf_remove_from_context(child_event); - - /* - * It can happen that the parent exits first, and has events - * that are still around due to the child reference. These - * events need to be zapped. - */ - if (child_event->parent) { - sync_child_event(child_event, child); - free_event(child_event); - } -} - -static void perf_event_exit_task_context(struct task_struct *child, int ctxn) -{ - struct perf_event *child_event, *tmp; - struct perf_event_context *child_ctx; - unsigned long flags; - - if (likely(!child->perf_event_ctxp[ctxn])) { - perf_event_task(child, NULL, 0); - return; - } - - local_irq_save(flags); - /* - * We can't reschedule here because interrupts are disabled, - * and either child is current or it is a task that can't be - * scheduled, so we are now safe from rescheduling changing - * our context. - */ - child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); - task_ctx_sched_out(child_ctx, EVENT_ALL); - - /* - * Take the context lock here so that if find_get_context is - * reading child->perf_event_ctxp, we wait until it has - * incremented the context's refcount before we do put_ctx below. - */ - raw_spin_lock(&child_ctx->lock); - child->perf_event_ctxp[ctxn] = NULL; - /* - * If this context is a clone; unclone it so it can't get - * swapped to another process while we're removing all - * the events from it. - */ - unclone_ctx(child_ctx); - update_context_time(child_ctx); - raw_spin_unlock_irqrestore(&child_ctx->lock, flags); - - /* - * Report the task dead after unscheduling the events so that we - * won't get any samples after PERF_RECORD_EXIT. We can however still - * get a few PERF_RECORD_READ events. - */ - perf_event_task(child, child_ctx, 0); - - /* - * We can recurse on the same lock type through: - * - * __perf_event_exit_task() - * sync_child_event() - * fput(parent_event->filp) - * perf_release() - * mutex_lock(&ctx->mutex) - * - * But since its the parent context it won't be the same instance. - */ - mutex_lock(&child_ctx->mutex); - -again: - list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups, - group_entry) - __perf_event_exit_task(child_event, child_ctx, child); - - list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups, - group_entry) - __perf_event_exit_task(child_event, child_ctx, child); - - /* - * If the last event was a group event, it will have appended all - * its siblings to the list, but we obtained 'tmp' before that which - * will still point to the list head terminating the iteration. - */ - if (!list_empty(&child_ctx->pinned_groups) || - !list_empty(&child_ctx->flexible_groups)) - goto again; - - mutex_unlock(&child_ctx->mutex); - - put_ctx(child_ctx); -} - -/* - * When a child task exits, feed back event values to parent events. - */ -void perf_event_exit_task(struct task_struct *child) -{ - struct perf_event *event, *tmp; - int ctxn; - - mutex_lock(&child->perf_event_mutex); - list_for_each_entry_safe(event, tmp, &child->perf_event_list, - owner_entry) { - list_del_init(&event->owner_entry); - - /* - * Ensure the list deletion is visible before we clear - * the owner, closes a race against perf_release() where - * we need to serialize on the owner->perf_event_mutex. - */ - smp_wmb(); - event->owner = NULL; - } - mutex_unlock(&child->perf_event_mutex); - - for_each_task_context_nr(ctxn) - perf_event_exit_task_context(child, ctxn); -} - -static void perf_free_event(struct perf_event *event, - struct perf_event_context *ctx) -{ - struct perf_event *parent = event->parent; - - if (WARN_ON_ONCE(!parent)) - return; - - mutex_lock(&parent->child_mutex); - list_del_init(&event->child_list); - mutex_unlock(&parent->child_mutex); - - fput(parent->filp); - - perf_group_detach(event); - list_del_event(event, ctx); - free_event(event); -} - -/* - * free an unexposed, unused context as created by inheritance by - * perf_event_init_task below, used by fork() in case of fail. - */ -void perf_event_free_task(struct task_struct *task) -{ - struct perf_event_context *ctx; - struct perf_event *event, *tmp; - int ctxn; - - for_each_task_context_nr(ctxn) { - ctx = task->perf_event_ctxp[ctxn]; - if (!ctx) - continue; - - mutex_lock(&ctx->mutex); -again: - list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, - group_entry) - perf_free_event(event, ctx); - - list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, - group_entry) - perf_free_event(event, ctx); - - if (!list_empty(&ctx->pinned_groups) || - !list_empty(&ctx->flexible_groups)) - goto again; - - mutex_unlock(&ctx->mutex); - - put_ctx(ctx); - } -} - -void perf_event_delayed_put(struct task_struct *task) -{ - int ctxn; - - for_each_task_context_nr(ctxn) - WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); -} - -/* - * inherit a event from parent task to child task: - */ -static struct perf_event * -inherit_event(struct perf_event *parent_event, - struct task_struct *parent, - struct perf_event_context *parent_ctx, - struct task_struct *child, - struct perf_event *group_leader, - struct perf_event_context *child_ctx) -{ - struct perf_event *child_event; - unsigned long flags; - - /* - * Instead of creating recursive hierarchies of events, - * we link inherited events back to the original parent, - * which has a filp for sure, which we use as the reference - * count: - */ - if (parent_event->parent) - parent_event = parent_event->parent; - - child_event = perf_event_alloc(&parent_event->attr, - parent_event->cpu, - child, - group_leader, parent_event, - NULL); - if (IS_ERR(child_event)) - return child_event; - get_ctx(child_ctx); - - /* - * Make the child state follow the state of the parent event, - * not its attr.disabled bit. We hold the parent's mutex, - * so we won't race with perf_event_{en, dis}able_family. - */ - if (parent_event->state >= PERF_EVENT_STATE_INACTIVE) - child_event->state = PERF_EVENT_STATE_INACTIVE; - else - child_event->state = PERF_EVENT_STATE_OFF; - - if (parent_event->attr.freq) { - u64 sample_period = parent_event->hw.sample_period; - struct hw_perf_event *hwc = &child_event->hw; - - hwc->sample_period = sample_period; - hwc->last_period = sample_period; - - local64_set(&hwc->period_left, sample_period); - } - - child_event->ctx = child_ctx; - child_event->overflow_handler = parent_event->overflow_handler; - - /* - * Precalculate sample_data sizes - */ - perf_event__header_size(child_event); - perf_event__id_header_size(child_event); - - /* - * Link it up in the child's context: - */ - raw_spin_lock_irqsave(&child_ctx->lock, flags); - add_event_to_ctx(child_event, child_ctx); - raw_spin_unlock_irqrestore(&child_ctx->lock, flags); - - /* - * Get a reference to the parent filp - we will fput it - * when the child event exits. This is safe to do because - * we are in the parent and we know that the filp still - * exists and has a nonzero count: - */ - atomic_long_inc(&parent_event->filp->f_count); - - /* - * Link this into the parent event's child list - */ - WARN_ON_ONCE(parent_event->ctx->parent_ctx); - mutex_lock(&parent_event->child_mutex); - list_add_tail(&child_event->child_list, &parent_event->child_list); - mutex_unlock(&parent_event->child_mutex); - - return child_event; -} - -static int inherit_group(struct perf_event *parent_event, - struct task_struct *parent, - struct perf_event_context *parent_ctx, - struct task_struct *child, - struct perf_event_context *child_ctx) -{ - struct perf_event *leader; - struct perf_event *sub; - struct perf_event *child_ctr; - - leader = inherit_event(parent_event, parent, parent_ctx, - child, NULL, child_ctx); - if (IS_ERR(leader)) - return PTR_ERR(leader); - list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { - child_ctr = inherit_event(sub, parent, parent_ctx, - child, leader, child_ctx); - if (IS_ERR(child_ctr)) - return PTR_ERR(child_ctr); - } - return 0; -} - -static int -inherit_task_group(struct perf_event *event, struct task_struct *parent, - struct perf_event_context *parent_ctx, - struct task_struct *child, int ctxn, - int *inherited_all) -{ - int ret; - struct perf_event_context *child_ctx; - - if (!event->attr.inherit) { - *inherited_all = 0; - return 0; - } - - child_ctx = child->perf_event_ctxp[ctxn]; - if (!child_ctx) { - /* - * This is executed from the parent task context, so - * inherit events that have been marked for cloning. - * First allocate and initialize a context for the - * child. - */ - - child_ctx = alloc_perf_context(event->pmu, child); - if (!child_ctx) - return -ENOMEM; - - child->perf_event_ctxp[ctxn] = child_ctx; - } - - ret = inherit_group(event, parent, parent_ctx, - child, child_ctx); - - if (ret) - *inherited_all = 0; - - return ret; -} - -/* - * Initialize the perf_event context in task_struct - */ -int perf_event_init_context(struct task_struct *child, int ctxn) -{ - struct perf_event_context *child_ctx, *parent_ctx; - struct perf_event_context *cloned_ctx; - struct perf_event *event; - struct task_struct *parent = current; - int inherited_all = 1; - unsigned long flags; - int ret = 0; - - if (likely(!parent->perf_event_ctxp[ctxn])) - return 0; - - /* - * If the parent's context is a clone, pin it so it won't get - * swapped under us. - */ - parent_ctx = perf_pin_task_context(parent, ctxn); - - /* - * No need to check if parent_ctx != NULL here; since we saw - * it non-NULL earlier, the only reason for it to become NULL - * is if we exit, and since we're currently in the middle of - * a fork we can't be exiting at the same time. - */ - - /* - * Lock the parent list. No need to lock the child - not PID - * hashed yet and not running, so nobody can access it. - */ - mutex_lock(&parent_ctx->mutex); - - /* - * We dont have to disable NMIs - we are only looking at - * the list, not manipulating it: - */ - list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { - ret = inherit_task_group(event, parent, parent_ctx, - child, ctxn, &inherited_all); - if (ret) - break; - } - - /* - * We can't hold ctx->lock when iterating the ->flexible_group list due - * to allocations, but we need to prevent rotation because - * rotate_ctx() will change the list from interrupt context. - */ - raw_spin_lock_irqsave(&parent_ctx->lock, flags); - parent_ctx->rotate_disable = 1; - raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); - - list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { - ret = inherit_task_group(event, parent, parent_ctx, - child, ctxn, &inherited_all); - if (ret) - break; - } - - raw_spin_lock_irqsave(&parent_ctx->lock, flags); - parent_ctx->rotate_disable = 0; - - child_ctx = child->perf_event_ctxp[ctxn]; - - if (child_ctx && inherited_all) { - /* - * Mark the child context as a clone of the parent - * context, or of whatever the parent is a clone of. - * - * Note that if the parent is a clone, the holding of - * parent_ctx->lock avoids it from being uncloned. - */ - cloned_ctx = parent_ctx->parent_ctx; - if (cloned_ctx) { - child_ctx->parent_ctx = cloned_ctx; - child_ctx->parent_gen = parent_ctx->parent_gen; - } else { - child_ctx->parent_ctx = parent_ctx; - child_ctx->parent_gen = parent_ctx->generation; - } - get_ctx(child_ctx->parent_ctx); - } - - raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); - mutex_unlock(&parent_ctx->mutex); - - perf_unpin_context(parent_ctx); - put_ctx(parent_ctx); - - return ret; -} - -/* - * Initialize the perf_event context in task_struct - */ -int perf_event_init_task(struct task_struct *child) -{ - int ctxn, ret; - - memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); - mutex_init(&child->perf_event_mutex); - INIT_LIST_HEAD(&child->perf_event_list); - - for_each_task_context_nr(ctxn) { - ret = perf_event_init_context(child, ctxn); - if (ret) - return ret; - } - - return 0; -} - -static void __init perf_event_init_all_cpus(void) -{ - struct swevent_htable *swhash; - int cpu; - - for_each_possible_cpu(cpu) { - swhash = &per_cpu(swevent_htable, cpu); - mutex_init(&swhash->hlist_mutex); - INIT_LIST_HEAD(&per_cpu(rotation_list, cpu)); - } -} - -static void __cpuinit perf_event_init_cpu(int cpu) -{ - struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); - - mutex_lock(&swhash->hlist_mutex); - if (swhash->hlist_refcount > 0) { - struct swevent_hlist *hlist; - - hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); - WARN_ON(!hlist); - rcu_assign_pointer(swhash->swevent_hlist, hlist); - } - mutex_unlock(&swhash->hlist_mutex); -} - -#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC -static void perf_pmu_rotate_stop(struct pmu *pmu) -{ - struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); - - WARN_ON(!irqs_disabled()); - - list_del_init(&cpuctx->rotation_list); -} - -static void __perf_event_exit_context(void *__info) -{ - struct perf_event_context *ctx = __info; - struct perf_event *event, *tmp; - - perf_pmu_rotate_stop(ctx->pmu); - - list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry) - __perf_remove_from_context(event); - list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry) - __perf_remove_from_context(event); -} - -static void perf_event_exit_cpu_context(int cpu) -{ - struct perf_event_context *ctx; - struct pmu *pmu; - int idx; - - idx = srcu_read_lock(&pmus_srcu); - list_for_each_entry_rcu(pmu, &pmus, entry) { - ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; - - mutex_lock(&ctx->mutex); - smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); - mutex_unlock(&ctx->mutex); - } - srcu_read_unlock(&pmus_srcu, idx); -} - -static void perf_event_exit_cpu(int cpu) -{ - struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); - - mutex_lock(&swhash->hlist_mutex); - swevent_hlist_release(swhash); - mutex_unlock(&swhash->hlist_mutex); - - perf_event_exit_cpu_context(cpu); -} -#else -static inline void perf_event_exit_cpu(int cpu) { } -#endif - -static int -perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) -{ - int cpu; - - for_each_online_cpu(cpu) - perf_event_exit_cpu(cpu); - - return NOTIFY_OK; -} - -/* - * Run the perf reboot notifier at the very last possible moment so that - * the generic watchdog code runs as long as possible. - */ -static struct notifier_block perf_reboot_notifier = { - .notifier_call = perf_reboot, - .priority = INT_MIN, -}; - -static int __cpuinit -perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) -{ - unsigned int cpu = (long)hcpu; - - switch (action & ~CPU_TASKS_FROZEN) { - - case CPU_UP_PREPARE: - case CPU_DOWN_FAILED: - perf_event_init_cpu(cpu); - break; - - case CPU_UP_CANCELED: - case CPU_DOWN_PREPARE: - perf_event_exit_cpu(cpu); - break; - - default: - break; - } - - return NOTIFY_OK; -} - -void __init perf_event_init(void) -{ - int ret; - - idr_init(&pmu_idr); - - perf_event_init_all_cpus(); - init_srcu_struct(&pmus_srcu); - perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); - perf_pmu_register(&perf_cpu_clock, NULL, -1); - perf_pmu_register(&perf_task_clock, NULL, -1); - perf_tp_register(); - perf_cpu_notifier(perf_cpu_notify); - register_reboot_notifier(&perf_reboot_notifier); - - ret = init_hw_breakpoint(); - WARN(ret, "hw_breakpoint initialization failed with: %d", ret); -} - -static int __init perf_event_sysfs_init(void) -{ - struct pmu *pmu; - int ret; - - mutex_lock(&pmus_lock); - - ret = bus_register(&pmu_bus); - if (ret) - goto unlock; - - list_for_each_entry(pmu, &pmus, entry) { - if (!pmu->name || pmu->type < 0) - continue; - - ret = pmu_dev_alloc(pmu); - WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); - } - pmu_bus_running = 1; - ret = 0; - -unlock: - mutex_unlock(&pmus_lock); - - return ret; -} -device_initcall(perf_event_sysfs_init); - -#ifdef CONFIG_CGROUP_PERF -static struct cgroup_subsys_state *perf_cgroup_create( - struct cgroup_subsys *ss, struct cgroup *cont) -{ - struct perf_cgroup *jc; - - jc = kzalloc(sizeof(*jc), GFP_KERNEL); - if (!jc) - return ERR_PTR(-ENOMEM); - - jc->info = alloc_percpu(struct perf_cgroup_info); - if (!jc->info) { - kfree(jc); - return ERR_PTR(-ENOMEM); - } - - return &jc->css; -} - -static void perf_cgroup_destroy(struct cgroup_subsys *ss, - struct cgroup *cont) -{ - struct perf_cgroup *jc; - jc = container_of(cgroup_subsys_state(cont, perf_subsys_id), - struct perf_cgroup, css); - free_percpu(jc->info); - kfree(jc); -} - -static int __perf_cgroup_move(void *info) -{ - struct task_struct *task = info; - perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); - return 0; -} - -static void perf_cgroup_move(struct task_struct *task) -{ - task_function_call(task, __perf_cgroup_move, task); -} - -static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, - struct cgroup *old_cgrp, struct task_struct *task, - bool threadgroup) -{ - perf_cgroup_move(task); - if (threadgroup) { - struct task_struct *c; - rcu_read_lock(); - list_for_each_entry_rcu(c, &task->thread_group, thread_group) { - perf_cgroup_move(c); - } - rcu_read_unlock(); - } -} - -static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, - struct cgroup *old_cgrp, struct task_struct *task) -{ - /* - * cgroup_exit() is called in the copy_process() failure path. - * Ignore this case since the task hasn't ran yet, this avoids - * trying to poke a half freed task state from generic code. - */ - if (!(task->flags & PF_EXITING)) - return; - - perf_cgroup_move(task); -} - -struct cgroup_subsys perf_subsys = { - .name = "perf_event", - .subsys_id = perf_subsys_id, - .create = perf_cgroup_create, - .destroy = perf_cgroup_destroy, - .exit = perf_cgroup_exit, - .attach = perf_cgroup_attach, -}; -#endif /* CONFIG_CGROUP_PERF */ -- cgit v1.2.3 From 48dbb6dc86ca5d1b2224937d774c7ba98bc3a485 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 3 May 2011 15:26:43 +0200 Subject: hw breakpoints: Move to kernel/events/ As part of the events sybsystem unification, relocate hw_breakpoint.c into its new destination. Cc: Frederic Weisbecker Signed-off-by: Borislav Petkov --- kernel/Makefile | 1 - kernel/events/Makefile | 3 +- kernel/events/hw_breakpoint.c | 659 ++++++++++++++++++++++++++++++++++++++++++ kernel/hw_breakpoint.c | 659 ------------------------------------------ 4 files changed, 661 insertions(+), 661 deletions(-) create mode 100644 kernel/events/hw_breakpoint.c delete mode 100644 kernel/hw_breakpoint.c diff --git a/kernel/Makefile b/kernel/Makefile index 79815306474..e9cf19155b4 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -105,7 +105,6 @@ obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-$(CONFIG_PERF_EVENTS) += events/ -obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o obj-$(CONFIG_PADATA) += padata.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o diff --git a/kernel/events/Makefile b/kernel/events/Makefile index 26c00e4570e..1ce23d3d839 100644 --- a/kernel/events/Makefile +++ b/kernel/events/Makefile @@ -2,4 +2,5 @@ ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_core.o = -pg endif -obj-y += core.o +obj-y := core.o +obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c new file mode 100644 index 00000000000..086adf25a55 --- /dev/null +++ b/kernel/events/hw_breakpoint.c @@ -0,0 +1,659 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) 2007 Alan Stern + * Copyright (C) IBM Corporation, 2009 + * Copyright (C) 2009, Frederic Weisbecker + * + * Thanks to Ingo Molnar for his many suggestions. + * + * Authors: Alan Stern + * K.Prasad + * Frederic Weisbecker + */ + +/* + * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, + * using the CPU's debug registers. + * This file contains the arch-independent routines. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + +/* + * Constraints data + */ + +/* Number of pinned cpu breakpoints in a cpu */ +static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]); + +/* Number of pinned task breakpoints in a cpu */ +static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]); + +/* Number of non-pinned cpu/task breakpoints in a cpu */ +static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]); + +static int nr_slots[TYPE_MAX]; + +/* Keep track of the breakpoints attached to tasks */ +static LIST_HEAD(bp_task_head); + +static int constraints_initialized; + +/* Gather the number of total pinned and un-pinned bp in a cpuset */ +struct bp_busy_slots { + unsigned int pinned; + unsigned int flexible; +}; + +/* Serialize accesses to the above constraints */ +static DEFINE_MUTEX(nr_bp_mutex); + +__weak int hw_breakpoint_weight(struct perf_event *bp) +{ + return 1; +} + +static inline enum bp_type_idx find_slot_idx(struct perf_event *bp) +{ + if (bp->attr.bp_type & HW_BREAKPOINT_RW) + return TYPE_DATA; + + return TYPE_INST; +} + +/* + * Report the maximum number of pinned breakpoints a task + * have in this cpu + */ +static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) +{ + int i; + unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); + + for (i = nr_slots[type] - 1; i >= 0; i--) { + if (tsk_pinned[i] > 0) + return i + 1; + } + + return 0; +} + +/* + * Count the number of breakpoints of the same type and same task. + * The given event must be not on the list. + */ +static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type) +{ + struct task_struct *tsk = bp->hw.bp_target; + struct perf_event *iter; + int count = 0; + + list_for_each_entry(iter, &bp_task_head, hw.bp_list) { + if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type) + count += hw_breakpoint_weight(iter); + } + + return count; +} + +/* + * Report the number of pinned/un-pinned breakpoints we have in + * a given cpu (cpu > -1) or in all of them (cpu = -1). + */ +static void +fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, + enum bp_type_idx type) +{ + int cpu = bp->cpu; + struct task_struct *tsk = bp->hw.bp_target; + + if (cpu >= 0) { + slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu); + if (!tsk) + slots->pinned += max_task_bp_pinned(cpu, type); + else + slots->pinned += task_bp_pinned(bp, type); + slots->flexible = per_cpu(nr_bp_flexible[type], cpu); + + return; + } + + for_each_online_cpu(cpu) { + unsigned int nr; + + nr = per_cpu(nr_cpu_bp_pinned[type], cpu); + if (!tsk) + nr += max_task_bp_pinned(cpu, type); + else + nr += task_bp_pinned(bp, type); + + if (nr > slots->pinned) + slots->pinned = nr; + + nr = per_cpu(nr_bp_flexible[type], cpu); + + if (nr > slots->flexible) + slots->flexible = nr; + } +} + +/* + * For now, continue to consider flexible as pinned, until we can + * ensure no flexible event can ever be scheduled before a pinned event + * in a same cpu. + */ +static void +fetch_this_slot(struct bp_busy_slots *slots, int weight) +{ + slots->pinned += weight; +} + +/* + * Add a pinned breakpoint for the given task in our constraint table + */ +static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable, + enum bp_type_idx type, int weight) +{ + unsigned int *tsk_pinned; + int old_count = 0; + int old_idx = 0; + int idx = 0; + + old_count = task_bp_pinned(bp, type); + old_idx = old_count - 1; + idx = old_idx + weight; + + /* tsk_pinned[n] is the number of tasks having n breakpoints */ + tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); + if (enable) { + tsk_pinned[idx]++; + if (old_count > 0) + tsk_pinned[old_idx]--; + } else { + tsk_pinned[idx]--; + if (old_count > 0) + tsk_pinned[old_idx]++; + } +} + +/* + * Add/remove the given breakpoint in our constraint table + */ +static void +toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, + int weight) +{ + int cpu = bp->cpu; + struct task_struct *tsk = bp->hw.bp_target; + + /* Pinned counter cpu profiling */ + if (!tsk) { + + if (enable) + per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight; + else + per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight; + return; + } + + /* Pinned counter task profiling */ + + if (!enable) + list_del(&bp->hw.bp_list); + + if (cpu >= 0) { + toggle_bp_task_slot(bp, cpu, enable, type, weight); + } else { + for_each_online_cpu(cpu) + toggle_bp_task_slot(bp, cpu, enable, type, weight); + } + + if (enable) + list_add_tail(&bp->hw.bp_list, &bp_task_head); +} + +/* + * Function to perform processor-specific cleanup during unregistration + */ +__weak void arch_unregister_hw_breakpoint(struct perf_event *bp) +{ + /* + * A weak stub function here for those archs that don't define + * it inside arch/.../kernel/hw_breakpoint.c + */ +} + +/* + * Contraints to check before allowing this new breakpoint counter: + * + * == Non-pinned counter == (Considered as pinned for now) + * + * - If attached to a single cpu, check: + * + * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) + * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM + * + * -> If there are already non-pinned counters in this cpu, it means + * there is already a free slot for them. + * Otherwise, we check that the maximum number of per task + * breakpoints (for this cpu) plus the number of per cpu breakpoint + * (for this cpu) doesn't cover every registers. + * + * - If attached to every cpus, check: + * + * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) + * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM + * + * -> This is roughly the same, except we check the number of per cpu + * bp for every cpu and we keep the max one. Same for the per tasks + * breakpoints. + * + * + * == Pinned counter == + * + * - If attached to a single cpu, check: + * + * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) + * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM + * + * -> Same checks as before. But now the nr_bp_flexible, if any, must keep + * one register at least (or they will never be fed). + * + * - If attached to every cpus, check: + * + * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) + * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM + */ +static int __reserve_bp_slot(struct perf_event *bp) +{ + struct bp_busy_slots slots = {0}; + enum bp_type_idx type; + int weight; + + /* We couldn't initialize breakpoint constraints on boot */ + if (!constraints_initialized) + return -ENOMEM; + + /* Basic checks */ + if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY || + bp->attr.bp_type == HW_BREAKPOINT_INVALID) + return -EINVAL; + + type = find_slot_idx(bp); + weight = hw_breakpoint_weight(bp); + + fetch_bp_busy_slots(&slots, bp, type); + /* + * Simulate the addition of this breakpoint to the constraints + * and see the result. + */ + fetch_this_slot(&slots, weight); + + /* Flexible counters need to keep at least one slot */ + if (slots.pinned + (!!slots.flexible) > nr_slots[type]) + return -ENOSPC; + + toggle_bp_slot(bp, true, type, weight); + + return 0; +} + +int reserve_bp_slot(struct perf_event *bp) +{ + int ret; + + mutex_lock(&nr_bp_mutex); + + ret = __reserve_bp_slot(bp); + + mutex_unlock(&nr_bp_mutex); + + return ret; +} + +static void __release_bp_slot(struct perf_event *bp) +{ + enum bp_type_idx type; + int weight; + + type = find_slot_idx(bp); + weight = hw_breakpoint_weight(bp); + toggle_bp_slot(bp, false, type, weight); +} + +void release_bp_slot(struct perf_event *bp) +{ + mutex_lock(&nr_bp_mutex); + + arch_unregister_hw_breakpoint(bp); + __release_bp_slot(bp); + + mutex_unlock(&nr_bp_mutex); +} + +/* + * Allow the kernel debugger to reserve breakpoint slots without + * taking a lock using the dbg_* variant of for the reserve and + * release breakpoint slots. + */ +int dbg_reserve_bp_slot(struct perf_event *bp) +{ + if (mutex_is_locked(&nr_bp_mutex)) + return -1; + + return __reserve_bp_slot(bp); +} + +int dbg_release_bp_slot(struct perf_event *bp) +{ + if (mutex_is_locked(&nr_bp_mutex)) + return -1; + + __release_bp_slot(bp); + + return 0; +} + +static int validate_hw_breakpoint(struct perf_event *bp) +{ + int ret; + + ret = arch_validate_hwbkpt_settings(bp); + if (ret) + return ret; + + if (arch_check_bp_in_kernelspace(bp)) { + if (bp->attr.exclude_kernel) + return -EINVAL; + /* + * Don't let unprivileged users set a breakpoint in the trap + * path to avoid trap recursion attacks. + */ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + } + + return 0; +} + +int register_perf_hw_breakpoint(struct perf_event *bp) +{ + int ret; + + ret = reserve_bp_slot(bp); + if (ret) + return ret; + + ret = validate_hw_breakpoint(bp); + + /* if arch_validate_hwbkpt_settings() fails then release bp slot */ + if (ret) + release_bp_slot(bp); + + return ret; +} + +/** + * register_user_hw_breakpoint - register a hardware breakpoint for user space + * @attr: breakpoint attributes + * @triggered: callback to trigger when we hit the breakpoint + * @tsk: pointer to 'task_struct' of the process to which the address belongs + */ +struct perf_event * +register_user_hw_breakpoint(struct perf_event_attr *attr, + perf_overflow_handler_t triggered, + struct task_struct *tsk) +{ + return perf_event_create_kernel_counter(attr, -1, tsk, triggered); +} +EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); + +/** + * modify_user_hw_breakpoint - modify a user-space hardware breakpoint + * @bp: the breakpoint structure to modify + * @attr: new breakpoint attributes + * @triggered: callback to trigger when we hit the breakpoint + * @tsk: pointer to 'task_struct' of the process to which the address belongs + */ +int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) +{ + u64 old_addr = bp->attr.bp_addr; + u64 old_len = bp->attr.bp_len; + int old_type = bp->attr.bp_type; + int err = 0; + + perf_event_disable(bp); + + bp->attr.bp_addr = attr->bp_addr; + bp->attr.bp_type = attr->bp_type; + bp->attr.bp_len = attr->bp_len; + + if (attr->disabled) + goto end; + + err = validate_hw_breakpoint(bp); + if (!err) + perf_event_enable(bp); + + if (err) { + bp->attr.bp_addr = old_addr; + bp->attr.bp_type = old_type; + bp->attr.bp_len = old_len; + if (!bp->attr.disabled) + perf_event_enable(bp); + + return err; + } + +end: + bp->attr.disabled = attr->disabled; + + return 0; +} +EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); + +/** + * unregister_hw_breakpoint - unregister a user-space hardware breakpoint + * @bp: the breakpoint structure to unregister + */ +void unregister_hw_breakpoint(struct perf_event *bp) +{ + if (!bp) + return; + perf_event_release_kernel(bp); +} +EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); + +/** + * register_wide_hw_breakpoint - register a wide breakpoint in the kernel + * @attr: breakpoint attributes + * @triggered: callback to trigger when we hit the breakpoint + * + * @return a set of per_cpu pointers to perf events + */ +struct perf_event * __percpu * +register_wide_hw_breakpoint(struct perf_event_attr *attr, + perf_overflow_handler_t triggered) +{ + struct perf_event * __percpu *cpu_events, **pevent, *bp; + long err; + int cpu; + + cpu_events = alloc_percpu(typeof(*cpu_events)); + if (!cpu_events) + return (void __percpu __force *)ERR_PTR(-ENOMEM); + + get_online_cpus(); + for_each_online_cpu(cpu) { + pevent = per_cpu_ptr(cpu_events, cpu); + bp = perf_event_create_kernel_counter(attr, cpu, NULL, triggered); + + *pevent = bp; + + if (IS_ERR(bp)) { + err = PTR_ERR(bp); + goto fail; + } + } + put_online_cpus(); + + return cpu_events; + +fail: + for_each_online_cpu(cpu) { + pevent = per_cpu_ptr(cpu_events, cpu); + if (IS_ERR(*pevent)) + break; + unregister_hw_breakpoint(*pevent); + } + put_online_cpus(); + + free_percpu(cpu_events); + return (void __percpu __force *)ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); + +/** + * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel + * @cpu_events: the per cpu set of events to unregister + */ +void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) +{ + int cpu; + struct perf_event **pevent; + + for_each_possible_cpu(cpu) { + pevent = per_cpu_ptr(cpu_events, cpu); + unregister_hw_breakpoint(*pevent); + } + free_percpu(cpu_events); +} +EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint); + +static struct notifier_block hw_breakpoint_exceptions_nb = { + .notifier_call = hw_breakpoint_exceptions_notify, + /* we need to be notified first */ + .priority = 0x7fffffff +}; + +static void bp_perf_event_destroy(struct perf_event *event) +{ + release_bp_slot(event); +} + +static int hw_breakpoint_event_init(struct perf_event *bp) +{ + int err; + + if (bp->attr.type != PERF_TYPE_BREAKPOINT) + return -ENOENT; + + err = register_perf_hw_breakpoint(bp); + if (err) + return err; + + bp->destroy = bp_perf_event_destroy; + + return 0; +} + +static int hw_breakpoint_add(struct perf_event *bp, int flags) +{ + if (!(flags & PERF_EF_START)) + bp->hw.state = PERF_HES_STOPPED; + + return arch_install_hw_breakpoint(bp); +} + +static void hw_breakpoint_del(struct perf_event *bp, int flags) +{ + arch_uninstall_hw_breakpoint(bp); +} + +static void hw_breakpoint_start(struct perf_event *bp, int flags) +{ + bp->hw.state = 0; +} + +static void hw_breakpoint_stop(struct perf_event *bp, int flags) +{ + bp->hw.state = PERF_HES_STOPPED; +} + +static struct pmu perf_breakpoint = { + .task_ctx_nr = perf_sw_context, /* could eventually get its own */ + + .event_init = hw_breakpoint_event_init, + .add = hw_breakpoint_add, + .del = hw_breakpoint_del, + .start = hw_breakpoint_start, + .stop = hw_breakpoint_stop, + .read = hw_breakpoint_pmu_read, +}; + +int __init init_hw_breakpoint(void) +{ + unsigned int **task_bp_pinned; + int cpu, err_cpu; + int i; + + for (i = 0; i < TYPE_MAX; i++) + nr_slots[i] = hw_breakpoint_slots(i); + + for_each_possible_cpu(cpu) { + for (i = 0; i < TYPE_MAX; i++) { + task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu); + *task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i], + GFP_KERNEL); + if (!*task_bp_pinned) + goto err_alloc; + } + } + + constraints_initialized = 1; + + perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT); + + return register_die_notifier(&hw_breakpoint_exceptions_nb); + + err_alloc: + for_each_possible_cpu(err_cpu) { + if (err_cpu == cpu) + break; + for (i = 0; i < TYPE_MAX; i++) + kfree(per_cpu(nr_task_bp_pinned[i], cpu)); + } + + return -ENOMEM; +} + + diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c deleted file mode 100644 index 086adf25a55..00000000000 --- a/kernel/hw_breakpoint.c +++ /dev/null @@ -1,659 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) 2007 Alan Stern - * Copyright (C) IBM Corporation, 2009 - * Copyright (C) 2009, Frederic Weisbecker - * - * Thanks to Ingo Molnar for his many suggestions. - * - * Authors: Alan Stern - * K.Prasad - * Frederic Weisbecker - */ - -/* - * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, - * using the CPU's debug registers. - * This file contains the arch-independent routines. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - - -/* - * Constraints data - */ - -/* Number of pinned cpu breakpoints in a cpu */ -static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]); - -/* Number of pinned task breakpoints in a cpu */ -static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]); - -/* Number of non-pinned cpu/task breakpoints in a cpu */ -static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]); - -static int nr_slots[TYPE_MAX]; - -/* Keep track of the breakpoints attached to tasks */ -static LIST_HEAD(bp_task_head); - -static int constraints_initialized; - -/* Gather the number of total pinned and un-pinned bp in a cpuset */ -struct bp_busy_slots { - unsigned int pinned; - unsigned int flexible; -}; - -/* Serialize accesses to the above constraints */ -static DEFINE_MUTEX(nr_bp_mutex); - -__weak int hw_breakpoint_weight(struct perf_event *bp) -{ - return 1; -} - -static inline enum bp_type_idx find_slot_idx(struct perf_event *bp) -{ - if (bp->attr.bp_type & HW_BREAKPOINT_RW) - return TYPE_DATA; - - return TYPE_INST; -} - -/* - * Report the maximum number of pinned breakpoints a task - * have in this cpu - */ -static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) -{ - int i; - unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); - - for (i = nr_slots[type] - 1; i >= 0; i--) { - if (tsk_pinned[i] > 0) - return i + 1; - } - - return 0; -} - -/* - * Count the number of breakpoints of the same type and same task. - * The given event must be not on the list. - */ -static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type) -{ - struct task_struct *tsk = bp->hw.bp_target; - struct perf_event *iter; - int count = 0; - - list_for_each_entry(iter, &bp_task_head, hw.bp_list) { - if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type) - count += hw_breakpoint_weight(iter); - } - - return count; -} - -/* - * Report the number of pinned/un-pinned breakpoints we have in - * a given cpu (cpu > -1) or in all of them (cpu = -1). - */ -static void -fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, - enum bp_type_idx type) -{ - int cpu = bp->cpu; - struct task_struct *tsk = bp->hw.bp_target; - - if (cpu >= 0) { - slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu); - if (!tsk) - slots->pinned += max_task_bp_pinned(cpu, type); - else - slots->pinned += task_bp_pinned(bp, type); - slots->flexible = per_cpu(nr_bp_flexible[type], cpu); - - return; - } - - for_each_online_cpu(cpu) { - unsigned int nr; - - nr = per_cpu(nr_cpu_bp_pinned[type], cpu); - if (!tsk) - nr += max_task_bp_pinned(cpu, type); - else - nr += task_bp_pinned(bp, type); - - if (nr > slots->pinned) - slots->pinned = nr; - - nr = per_cpu(nr_bp_flexible[type], cpu); - - if (nr > slots->flexible) - slots->flexible = nr; - } -} - -/* - * For now, continue to consider flexible as pinned, until we can - * ensure no flexible event can ever be scheduled before a pinned event - * in a same cpu. - */ -static void -fetch_this_slot(struct bp_busy_slots *slots, int weight) -{ - slots->pinned += weight; -} - -/* - * Add a pinned breakpoint for the given task in our constraint table - */ -static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable, - enum bp_type_idx type, int weight) -{ - unsigned int *tsk_pinned; - int old_count = 0; - int old_idx = 0; - int idx = 0; - - old_count = task_bp_pinned(bp, type); - old_idx = old_count - 1; - idx = old_idx + weight; - - /* tsk_pinned[n] is the number of tasks having n breakpoints */ - tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); - if (enable) { - tsk_pinned[idx]++; - if (old_count > 0) - tsk_pinned[old_idx]--; - } else { - tsk_pinned[idx]--; - if (old_count > 0) - tsk_pinned[old_idx]++; - } -} - -/* - * Add/remove the given breakpoint in our constraint table - */ -static void -toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, - int weight) -{ - int cpu = bp->cpu; - struct task_struct *tsk = bp->hw.bp_target; - - /* Pinned counter cpu profiling */ - if (!tsk) { - - if (enable) - per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight; - else - per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight; - return; - } - - /* Pinned counter task profiling */ - - if (!enable) - list_del(&bp->hw.bp_list); - - if (cpu >= 0) { - toggle_bp_task_slot(bp, cpu, enable, type, weight); - } else { - for_each_online_cpu(cpu) - toggle_bp_task_slot(bp, cpu, enable, type, weight); - } - - if (enable) - list_add_tail(&bp->hw.bp_list, &bp_task_head); -} - -/* - * Function to perform processor-specific cleanup during unregistration - */ -__weak void arch_unregister_hw_breakpoint(struct perf_event *bp) -{ - /* - * A weak stub function here for those archs that don't define - * it inside arch/.../kernel/hw_breakpoint.c - */ -} - -/* - * Contraints to check before allowing this new breakpoint counter: - * - * == Non-pinned counter == (Considered as pinned for now) - * - * - If attached to a single cpu, check: - * - * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) - * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM - * - * -> If there are already non-pinned counters in this cpu, it means - * there is already a free slot for them. - * Otherwise, we check that the maximum number of per task - * breakpoints (for this cpu) plus the number of per cpu breakpoint - * (for this cpu) doesn't cover every registers. - * - * - If attached to every cpus, check: - * - * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) - * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM - * - * -> This is roughly the same, except we check the number of per cpu - * bp for every cpu and we keep the max one. Same for the per tasks - * breakpoints. - * - * - * == Pinned counter == - * - * - If attached to a single cpu, check: - * - * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) - * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM - * - * -> Same checks as before. But now the nr_bp_flexible, if any, must keep - * one register at least (or they will never be fed). - * - * - If attached to every cpus, check: - * - * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) - * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM - */ -static int __reserve_bp_slot(struct perf_event *bp) -{ - struct bp_busy_slots slots = {0}; - enum bp_type_idx type; - int weight; - - /* We couldn't initialize breakpoint constraints on boot */ - if (!constraints_initialized) - return -ENOMEM; - - /* Basic checks */ - if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY || - bp->attr.bp_type == HW_BREAKPOINT_INVALID) - return -EINVAL; - - type = find_slot_idx(bp); - weight = hw_breakpoint_weight(bp); - - fetch_bp_busy_slots(&slots, bp, type); - /* - * Simulate the addition of this breakpoint to the constraints - * and see the result. - */ - fetch_this_slot(&slots, weight); - - /* Flexible counters need to keep at least one slot */ - if (slots.pinned + (!!slots.flexible) > nr_slots[type]) - return -ENOSPC; - - toggle_bp_slot(bp, true, type, weight); - - return 0; -} - -int reserve_bp_slot(struct perf_event *bp) -{ - int ret; - - mutex_lock(&nr_bp_mutex); - - ret = __reserve_bp_slot(bp); - - mutex_unlock(&nr_bp_mutex); - - return ret; -} - -static void __release_bp_slot(struct perf_event *bp) -{ - enum bp_type_idx type; - int weight; - - type = find_slot_idx(bp); - weight = hw_breakpoint_weight(bp); - toggle_bp_slot(bp, false, type, weight); -} - -void release_bp_slot(struct perf_event *bp) -{ - mutex_lock(&nr_bp_mutex); - - arch_unregister_hw_breakpoint(bp); - __release_bp_slot(bp); - - mutex_unlock(&nr_bp_mutex); -} - -/* - * Allow the kernel debugger to reserve breakpoint slots without - * taking a lock using the dbg_* variant of for the reserve and - * release breakpoint slots. - */ -int dbg_reserve_bp_slot(struct perf_event *bp) -{ - if (mutex_is_locked(&nr_bp_mutex)) - return -1; - - return __reserve_bp_slot(bp); -} - -int dbg_release_bp_slot(struct perf_event *bp) -{ - if (mutex_is_locked(&nr_bp_mutex)) - return -1; - - __release_bp_slot(bp); - - return 0; -} - -static int validate_hw_breakpoint(struct perf_event *bp) -{ - int ret; - - ret = arch_validate_hwbkpt_settings(bp); - if (ret) - return ret; - - if (arch_check_bp_in_kernelspace(bp)) { - if (bp->attr.exclude_kernel) - return -EINVAL; - /* - * Don't let unprivileged users set a breakpoint in the trap - * path to avoid trap recursion attacks. - */ - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - } - - return 0; -} - -int register_perf_hw_breakpoint(struct perf_event *bp) -{ - int ret; - - ret = reserve_bp_slot(bp); - if (ret) - return ret; - - ret = validate_hw_breakpoint(bp); - - /* if arch_validate_hwbkpt_settings() fails then release bp slot */ - if (ret) - release_bp_slot(bp); - - return ret; -} - -/** - * register_user_hw_breakpoint - register a hardware breakpoint for user space - * @attr: breakpoint attributes - * @triggered: callback to trigger when we hit the breakpoint - * @tsk: pointer to 'task_struct' of the process to which the address belongs - */ -struct perf_event * -register_user_hw_breakpoint(struct perf_event_attr *attr, - perf_overflow_handler_t triggered, - struct task_struct *tsk) -{ - return perf_event_create_kernel_counter(attr, -1, tsk, triggered); -} -EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); - -/** - * modify_user_hw_breakpoint - modify a user-space hardware breakpoint - * @bp: the breakpoint structure to modify - * @attr: new breakpoint attributes - * @triggered: callback to trigger when we hit the breakpoint - * @tsk: pointer to 'task_struct' of the process to which the address belongs - */ -int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) -{ - u64 old_addr = bp->attr.bp_addr; - u64 old_len = bp->attr.bp_len; - int old_type = bp->attr.bp_type; - int err = 0; - - perf_event_disable(bp); - - bp->attr.bp_addr = attr->bp_addr; - bp->attr.bp_type = attr->bp_type; - bp->attr.bp_len = attr->bp_len; - - if (attr->disabled) - goto end; - - err = validate_hw_breakpoint(bp); - if (!err) - perf_event_enable(bp); - - if (err) { - bp->attr.bp_addr = old_addr; - bp->attr.bp_type = old_type; - bp->attr.bp_len = old_len; - if (!bp->attr.disabled) - perf_event_enable(bp); - - return err; - } - -end: - bp->attr.disabled = attr->disabled; - - return 0; -} -EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); - -/** - * unregister_hw_breakpoint - unregister a user-space hardware breakpoint - * @bp: the breakpoint structure to unregister - */ -void unregister_hw_breakpoint(struct perf_event *bp) -{ - if (!bp) - return; - perf_event_release_kernel(bp); -} -EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); - -/** - * register_wide_hw_breakpoint - register a wide breakpoint in the kernel - * @attr: breakpoint attributes - * @triggered: callback to trigger when we hit the breakpoint - * - * @return a set of per_cpu pointers to perf events - */ -struct perf_event * __percpu * -register_wide_hw_breakpoint(struct perf_event_attr *attr, - perf_overflow_handler_t triggered) -{ - struct perf_event * __percpu *cpu_events, **pevent, *bp; - long err; - int cpu; - - cpu_events = alloc_percpu(typeof(*cpu_events)); - if (!cpu_events) - return (void __percpu __force *)ERR_PTR(-ENOMEM); - - get_online_cpus(); - for_each_online_cpu(cpu) { - pevent = per_cpu_ptr(cpu_events, cpu); - bp = perf_event_create_kernel_counter(attr, cpu, NULL, triggered); - - *pevent = bp; - - if (IS_ERR(bp)) { - err = PTR_ERR(bp); - goto fail; - } - } - put_online_cpus(); - - return cpu_events; - -fail: - for_each_online_cpu(cpu) { - pevent = per_cpu_ptr(cpu_events, cpu); - if (IS_ERR(*pevent)) - break; - unregister_hw_breakpoint(*pevent); - } - put_online_cpus(); - - free_percpu(cpu_events); - return (void __percpu __force *)ERR_PTR(err); -} -EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); - -/** - * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel - * @cpu_events: the per cpu set of events to unregister - */ -void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) -{ - int cpu; - struct perf_event **pevent; - - for_each_possible_cpu(cpu) { - pevent = per_cpu_ptr(cpu_events, cpu); - unregister_hw_breakpoint(*pevent); - } - free_percpu(cpu_events); -} -EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint); - -static struct notifier_block hw_breakpoint_exceptions_nb = { - .notifier_call = hw_breakpoint_exceptions_notify, - /* we need to be notified first */ - .priority = 0x7fffffff -}; - -static void bp_perf_event_destroy(struct perf_event *event) -{ - release_bp_slot(event); -} - -static int hw_breakpoint_event_init(struct perf_event *bp) -{ - int err; - - if (bp->attr.type != PERF_TYPE_BREAKPOINT) - return -ENOENT; - - err = register_perf_hw_breakpoint(bp); - if (err) - return err; - - bp->destroy = bp_perf_event_destroy; - - return 0; -} - -static int hw_breakpoint_add(struct perf_event *bp, int flags) -{ - if (!(flags & PERF_EF_START)) - bp->hw.state = PERF_HES_STOPPED; - - return arch_install_hw_breakpoint(bp); -} - -static void hw_breakpoint_del(struct perf_event *bp, int flags) -{ - arch_uninstall_hw_breakpoint(bp); -} - -static void hw_breakpoint_start(struct perf_event *bp, int flags) -{ - bp->hw.state = 0; -} - -static void hw_breakpoint_stop(struct perf_event *bp, int flags) -{ - bp->hw.state = PERF_HES_STOPPED; -} - -static struct pmu perf_breakpoint = { - .task_ctx_nr = perf_sw_context, /* could eventually get its own */ - - .event_init = hw_breakpoint_event_init, - .add = hw_breakpoint_add, - .del = hw_breakpoint_del, - .start = hw_breakpoint_start, - .stop = hw_breakpoint_stop, - .read = hw_breakpoint_pmu_read, -}; - -int __init init_hw_breakpoint(void) -{ - unsigned int **task_bp_pinned; - int cpu, err_cpu; - int i; - - for (i = 0; i < TYPE_MAX; i++) - nr_slots[i] = hw_breakpoint_slots(i); - - for_each_possible_cpu(cpu) { - for (i = 0; i < TYPE_MAX; i++) { - task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu); - *task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i], - GFP_KERNEL); - if (!*task_bp_pinned) - goto err_alloc; - } - } - - constraints_initialized = 1; - - perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT); - - return register_die_notifier(&hw_breakpoint_exceptions_nb); - - err_alloc: - for_each_possible_cpu(err_cpu) { - if (err_cpu == cpu) - break; - for (i = 0; i < TYPE_MAX; i++) - kfree(per_cpu(nr_task_bp_pinned[i], cpu)); - } - - return -ENOMEM; -} - - -- cgit v1.2.3 From e7e7ee2eab2080248084d71fe0a115ab745eb2aa Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 4 May 2011 08:42:29 +0200 Subject: perf events: Clean up definitions and initializers, update copyrights Fix a few inconsistent style bits that were added over the past few months. Cc: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-yv4hwf9yhnzoada8pcpb3a97@git.kernel.org Signed-off-by: Ingo Molnar --- include/linux/perf_event.h | 96 +++++++++++++++++++++------------------------- kernel/events/core.c | 40 +++++++++---------- 2 files changed, 64 insertions(+), 72 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 9eec53d9737..207c16976a1 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -2,8 +2,8 @@ * Performance events: * * Copyright (C) 2008-2009, Thomas Gleixner - * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar - * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar + * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra * * Data type definitions, declarations, prototypes. * @@ -468,9 +468,9 @@ enum perf_callchain_context { PERF_CONTEXT_MAX = (__u64)-4095, }; -#define PERF_FLAG_FD_NO_GROUP (1U << 0) -#define PERF_FLAG_FD_OUTPUT (1U << 1) -#define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ +#define PERF_FLAG_FD_NO_GROUP (1U << 0) +#define PERF_FLAG_FD_OUTPUT (1U << 1) +#define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ #ifdef __KERNEL__ /* @@ -484,9 +484,9 @@ enum perf_callchain_context { #endif struct perf_guest_info_callbacks { - int (*is_in_guest) (void); - int (*is_user_mode) (void); - unsigned long (*get_guest_ip) (void); + int (*is_in_guest)(void); + int (*is_user_mode)(void); + unsigned long (*get_guest_ip)(void); }; #ifdef CONFIG_HAVE_HW_BREAKPOINT @@ -652,19 +652,19 @@ struct pmu { * Start the transaction, after this ->add() doesn't need to * do schedulability tests. */ - void (*start_txn) (struct pmu *pmu); /* optional */ + void (*start_txn) (struct pmu *pmu); /* optional */ /* * If ->start_txn() disabled the ->add() schedulability test * then ->commit_txn() is required to perform one. On success * the transaction is closed. On error the transaction is kept * open until ->cancel_txn() is called. */ - int (*commit_txn) (struct pmu *pmu); /* optional */ + int (*commit_txn) (struct pmu *pmu); /* optional */ /* * Will cancel the transaction, assumes ->del() is called * for each successful ->add() during the transaction. */ - void (*cancel_txn) (struct pmu *pmu); /* optional */ + void (*cancel_txn) (struct pmu *pmu); /* optional */ }; /** @@ -712,15 +712,15 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, int, struct pt_regs *regs); enum perf_group_flag { - PERF_GROUP_SOFTWARE = 0x1, + PERF_GROUP_SOFTWARE = 0x1, }; -#define SWEVENT_HLIST_BITS 8 -#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) +#define SWEVENT_HLIST_BITS 8 +#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) struct swevent_hlist { - struct hlist_head heads[SWEVENT_HLIST_SIZE]; - struct rcu_head rcu_head; + struct hlist_head heads[SWEVENT_HLIST_SIZE]; + struct rcu_head rcu_head; }; #define PERF_ATTACH_CONTEXT 0x01 @@ -733,13 +733,13 @@ struct swevent_hlist { * This is a per-cpu dynamically allocated data structure. */ struct perf_cgroup_info { - u64 time; - u64 timestamp; + u64 time; + u64 timestamp; }; struct perf_cgroup { - struct cgroup_subsys_state css; - struct perf_cgroup_info *info; /* timing info, one per cpu */ + struct cgroup_subsys_state css; + struct perf_cgroup_info *info; /* timing info, one per cpu */ }; #endif @@ -923,7 +923,7 @@ struct perf_event_context { /* * Number of contexts where an event can trigger: - * task, softirq, hardirq, nmi. + * task, softirq, hardirq, nmi. */ #define PERF_NR_CONTEXTS 4 @@ -1001,8 +1001,7 @@ struct perf_sample_data { struct perf_raw_record *raw; }; -static inline -void perf_sample_data_init(struct perf_sample_data *data, u64 addr) +static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr) { data->addr = addr; data->raw = NULL; @@ -1039,8 +1038,7 @@ extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); #ifndef perf_arch_fetch_caller_regs -static inline void -perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } +static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } #endif /* @@ -1080,8 +1078,7 @@ static inline void perf_event_task_sched_in(struct task_struct *task) __perf_event_task_sched_in(task); } -static inline -void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) +static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) { perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); @@ -1099,14 +1096,10 @@ extern void perf_event_fork(struct task_struct *tsk); /* Callchains */ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); -extern void perf_callchain_user(struct perf_callchain_entry *entry, - struct pt_regs *regs); -extern void perf_callchain_kernel(struct perf_callchain_entry *entry, - struct pt_regs *regs); - +extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); +extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); -static inline void -perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) +static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) { if (entry->nr < PERF_MAX_STACK_DEPTH) entry->ip[entry->nr++] = ip; @@ -1142,9 +1135,9 @@ extern void perf_tp_event(u64 addr, u64 count, void *record, extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags -#define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ - PERF_RECORD_MISC_KERNEL) -#define perf_instruction_pointer(regs) instruction_pointer(regs) +# define perf_misc_flags(regs) \ + (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) +# define perf_instruction_pointer(regs) instruction_pointer(regs) #endif extern int perf_output_begin(struct perf_output_handle *handle, @@ -1179,9 +1172,9 @@ static inline void perf_bp_event(struct perf_event *event, void *data) { } static inline int perf_register_guest_info_callbacks -(struct perf_guest_info_callbacks *callbacks) { return 0; } +(struct perf_guest_info_callbacks *callbacks) { return 0; } static inline int perf_unregister_guest_info_callbacks -(struct perf_guest_info_callbacks *callbacks) { return 0; } +(struct perf_guest_info_callbacks *callbacks) { return 0; } static inline void perf_event_mmap(struct vm_area_struct *vma) { } static inline void perf_event_comm(struct task_struct *tsk) { } @@ -1194,23 +1187,22 @@ static inline void perf_event_disable(struct perf_event *event) { } static inline void perf_event_task_tick(void) { } #endif -#define perf_output_put(handle, x) \ - perf_output_copy((handle), &(x), sizeof(x)) +#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) /* * This has to have a higher priority than migration_notifier in sched.c. */ -#define perf_cpu_notifier(fn) \ -do { \ - static struct notifier_block fn##_nb __cpuinitdata = \ - { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ - fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ - (void *)(unsigned long)smp_processor_id()); \ - fn(&fn##_nb, (unsigned long)CPU_STARTING, \ - (void *)(unsigned long)smp_processor_id()); \ - fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ - (void *)(unsigned long)smp_processor_id()); \ - register_cpu_notifier(&fn##_nb); \ +#define perf_cpu_notifier(fn) \ +do { \ + static struct notifier_block fn##_nb __cpuinitdata = \ + { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ + fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ + (void *)(unsigned long)smp_processor_id()); \ + fn(&fn##_nb, (unsigned long)CPU_STARTING, \ + (void *)(unsigned long)smp_processor_id()); \ + fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ + (void *)(unsigned long)smp_processor_id()); \ + register_cpu_notifier(&fn##_nb); \ } while (0) #endif /* __KERNEL__ */ diff --git a/kernel/events/core.c b/kernel/events/core.c index 440bc485bbf..0fc34a370ba 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2,8 +2,8 @@ * Performance events core code: * * Copyright (C) 2008 Thomas Gleixner - * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar - * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra * Copyright © 2009 Paul Mackerras, IBM Corp. * * For licensing details see kernel-base/COPYING @@ -39,10 +39,10 @@ #include struct remote_function_call { - struct task_struct *p; - int (*func)(void *info); - void *info; - int ret; + struct task_struct *p; + int (*func)(void *info); + void *info; + int ret; }; static void remote_function(void *data) @@ -76,10 +76,10 @@ static int task_function_call(struct task_struct *p, int (*func) (void *info), void *info) { struct remote_function_call data = { - .p = p, - .func = func, - .info = info, - .ret = -ESRCH, /* No such (running) process */ + .p = p, + .func = func, + .info = info, + .ret = -ESRCH, /* No such (running) process */ }; if (task_curr(p)) @@ -100,10 +100,10 @@ task_function_call(struct task_struct *p, int (*func) (void *info), void *info) static int cpu_function_call(int cpu, int (*func) (void *info), void *info) { struct remote_function_call data = { - .p = NULL, - .func = func, - .info = info, - .ret = -ENXIO, /* No such CPU */ + .p = NULL, + .func = func, + .info = info, + .ret = -ENXIO, /* No such CPU */ }; smp_call_function_single(cpu, remote_function, &data, 1); @@ -7445,11 +7445,11 @@ static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, } struct cgroup_subsys perf_subsys = { - .name = "perf_event", - .subsys_id = perf_subsys_id, - .create = perf_cgroup_create, - .destroy = perf_cgroup_destroy, - .exit = perf_cgroup_exit, - .attach = perf_cgroup_attach, + .name = "perf_event", + .subsys_id = perf_subsys_id, + .create = perf_cgroup_create, + .destroy = perf_cgroup_destroy, + .exit = perf_cgroup_exit, + .attach = perf_cgroup_attach, }; #endif /* CONFIG_CGROUP_PERF */ -- cgit v1.2.3 From e04d1b23f9706186187dcb0be1a752e48dcc540b Mon Sep 17 00:00:00 2001 From: Lin Ming Date: Fri, 6 May 2011 07:14:02 +0000 Subject: perf events, x86: Add SandyBridge stalled-cycles-frontend/backend events Extend the Intel SandyBridge PMU driver with definitions for generic front-end and back-end stall events. ( As commit 3011203 "perf events, x86: Add Westmere stalled-cycles-frontend/backend events" says, these are only approximations. ) Signed-off-by: Lin Ming Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Steven Rostedt Link: http://lkml.kernel.org/r/1304666042-17577-1-git-send-email-ming.m.lin@intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index e61539b07d2..7cf2ec59c81 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1474,6 +1474,12 @@ static __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_snb_event_constraints; x86_pmu.pebs_constraints = intel_snb_pebs_events; + + /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; + /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/ + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x18001b1; + pr_cont("SandyBridge events, "); break; -- cgit v1.2.3 From 2b348a77981227c6b64fb9cf19f7c711a6806bc9 Mon Sep 17 00:00:00 2001 From: Lin Ming Date: Fri, 29 Apr 2011 08:41:57 +0000 Subject: perf probe: Fix the missed parameter initialization pubname_callback_param::found should be initialized to 0 in fastpath lookup, the structure is on the stack and uninitialized otherwise. Signed-off-by: Lin Ming Cc: Arnaldo Carvalho de Melo Link: http://lkml.kernel.org/r/1304066518-30420-2-git-send-email-ming.m.lin@intel.com Signed-off-by: Ingo Molnar --- tools/perf/util/probe-finder.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index a7c7145a8d4..3b9d0b800d5 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -1538,6 +1538,7 @@ static int find_probes(int fd, struct probe_finder *pf) .file = pp->file, .cu_die = &pf->cu_die, .sp_die = &pf->sp_die, + .found = 0, }; struct dwarf_callback_param probe_param = { .data = pf, -- cgit v1.2.3 From 449a66fd1fa75d36dca917704827c40c8f416bca Mon Sep 17 00:00:00 2001 From: Richard Weinberger Date: Thu, 12 May 2011 15:11:12 +0200 Subject: x86: Remove warning and warning_symbol from struct stacktrace_ops Both warning and warning_symbol are nowhere used. Let's get rid of them. Signed-off-by: Richard Weinberger Cc: Oleg Nesterov Cc: Andrew Morton Cc: Huang Ying Cc: Soeren Sandmann Pedersen Cc: Namhyung Kim Cc: x86 Cc: H. Peter Anvin Cc: Thomas Gleixner Cc: Robert Richter Cc: Paul Mundt Link: http://lkml.kernel.org/r/1305205872-10321-2-git-send-email-richard@nod.at Signed-off-by: Frederic Weisbecker --- arch/x86/include/asm/stacktrace.h | 3 --- arch/x86/kernel/cpu/perf_event.c | 13 ------------- arch/x86/kernel/dumpstack.c | 16 ---------------- arch/x86/kernel/stacktrace.c | 13 ------------- arch/x86/oprofile/backtrace.c | 13 ------------- 5 files changed, 58 deletions(-) diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index d7e89c83645..70bbe39043a 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h @@ -37,9 +37,6 @@ print_context_stack_bp(struct thread_info *tinfo, /* Generic stack tracer with callbacks */ struct stacktrace_ops { - void (*warning)(void *data, char *msg); - /* msg must contain %s for the symbol */ - void (*warning_symbol)(void *data, char *msg, unsigned long symbol); void (*address)(void *data, unsigned long address, int reliable); /* On negative return stop dumping */ int (*stack)(void *data, char *name); diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 0de6b2b31f6..3a0338b4b17 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1773,17 +1773,6 @@ static struct pmu pmu = { * callchain support */ -static void -backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) -{ - /* Ignore warnings */ -} - -static void backtrace_warning(void *data, char *msg) -{ - /* Ignore warnings */ -} - static int backtrace_stack(void *data, char *name) { return 0; @@ -1797,8 +1786,6 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) } static const struct stacktrace_ops backtrace_ops = { - .warning = backtrace_warning, - .warning_symbol = backtrace_warning_symbol, .stack = backtrace_stack, .address = backtrace_address, .walk_stack = print_context_stack_bp, diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index e2a3f0606da..f478ff6877e 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -135,20 +135,6 @@ print_context_stack_bp(struct thread_info *tinfo, } EXPORT_SYMBOL_GPL(print_context_stack_bp); - -static void -print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) -{ - printk(data); - print_symbol(msg, symbol); - printk("\n"); -} - -static void print_trace_warning(void *data, char *msg) -{ - printk("%s%s\n", (char *)data, msg); -} - static int print_trace_stack(void *data, char *name) { printk("%s <%s> ", (char *)data, name); @@ -166,8 +152,6 @@ static void print_trace_address(void *data, unsigned long addr, int reliable) } static const struct stacktrace_ops print_trace_ops = { - .warning = print_trace_warning, - .warning_symbol = print_trace_warning_symbol, .stack = print_trace_stack, .address = print_trace_address, .walk_stack = print_context_stack, diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 6515733a289..55d9bc03f69 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -9,15 +9,6 @@ #include #include -static void save_stack_warning(void *data, char *msg) -{ -} - -static void -save_stack_warning_symbol(void *data, char *msg, unsigned long symbol) -{ -} - static int save_stack_stack(void *data, char *name) { return 0; @@ -53,16 +44,12 @@ save_stack_address_nosched(void *data, unsigned long addr, int reliable) } static const struct stacktrace_ops save_stack_ops = { - .warning = save_stack_warning, - .warning_symbol = save_stack_warning_symbol, .stack = save_stack_stack, .address = save_stack_address, .walk_stack = print_context_stack, }; static const struct stacktrace_ops save_stack_ops_nosched = { - .warning = save_stack_warning, - .warning_symbol = save_stack_warning_symbol, .stack = save_stack_stack, .address = save_stack_address_nosched, .walk_stack = print_context_stack, diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c index 2d49d4e19a3..a5b64ab4cd6 100644 --- a/arch/x86/oprofile/backtrace.c +++ b/arch/x86/oprofile/backtrace.c @@ -16,17 +16,6 @@ #include #include -static void backtrace_warning_symbol(void *data, char *msg, - unsigned long symbol) -{ - /* Ignore warnings */ -} - -static void backtrace_warning(void *data, char *msg) -{ - /* Ignore warnings */ -} - static int backtrace_stack(void *data, char *name) { /* Yes, we want all stacks */ @@ -42,8 +31,6 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) } static struct stacktrace_ops backtrace_ops = { - .warning = backtrace_warning, - .warning_symbol = backtrace_warning_symbol, .stack = backtrace_stack, .address = backtrace_address, .walk_stack = print_context_stack, -- cgit v1.2.3 From dd5477ff3ba978892014ea5f988cb1bf04aa505e Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 6 Apr 2011 13:21:17 -0400 Subject: ftrace/trivial: Clean up recordmcount.c to use Linux style comparisons The Linux ftrace subsystem style for comparing is: var == 1 var > 0 and not: 1 == var 0 < var It is considered that Linux developers are smart enough not to do the if (var = 1) mistake. Cc: John Reiser Link: http://lkml.kernel.org/r/20110421023737.290712238@goodmis.org Signed-off-by: Steven Rostedt --- scripts/recordmcount.c | 48 ++++++++++++++++++++++++------------------------ scripts/recordmcount.h | 16 ++++++++-------- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index f9f6f52db77..37afe0ecacb 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -78,7 +78,7 @@ static off_t ulseek(int const fd, off_t const offset, int const whence) { off_t const w = lseek(fd, offset, whence); - if ((off_t)-1 == w) { + if (w == (off_t)-1) { perror("lseek"); fail_file(); } @@ -111,7 +111,7 @@ static void * umalloc(size_t size) { void *const addr = malloc(size); - if (0 == addr) { + if (addr == 0) { fprintf(stderr, "malloc failed: %zu bytes\n", size); fail_file(); } @@ -136,7 +136,7 @@ static void *mmap_file(char const *fname) void *addr; fd_map = open(fname, O_RDWR); - if (0 > fd_map || 0 > fstat(fd_map, &sb)) { + if (fd_map < 0 || fstat(fd_map, &sb) < 0) { perror(fname); fail_file(); } @@ -147,7 +147,7 @@ static void *mmap_file(char const *fname) addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd_map, 0); mmap_failed = 0; - if (MAP_FAILED == addr) { + if (addr == MAP_FAILED) { mmap_failed = 1; addr = umalloc(sb.st_size); uread(fd_map, addr, sb.st_size); @@ -206,12 +206,12 @@ static uint32_t (*w2)(uint16_t); static int is_mcounted_section_name(char const *const txtname) { - return 0 == strcmp(".text", txtname) || - 0 == strcmp(".ref.text", txtname) || - 0 == strcmp(".sched.text", txtname) || - 0 == strcmp(".spinlock.text", txtname) || - 0 == strcmp(".irqentry.text", txtname) || - 0 == strcmp(".text.unlikely", txtname); + return strcmp(".text", txtname) == 0 || + strcmp(".ref.text", txtname) == 0 || + strcmp(".sched.text", txtname) == 0 || + strcmp(".spinlock.text", txtname) == 0 || + strcmp(".irqentry.text", txtname) == 0 || + strcmp(".text.unlikely", txtname) == 0; } /* 32 bit and 64 bit are very similar */ @@ -270,7 +270,7 @@ do_file(char const *const fname) fail_file(); } break; case ELFDATA2LSB: { - if (1 != *(unsigned char const *)&endian) { + if (*(unsigned char const *)&endian != 1) { /* main() is big endian, file.o is little endian. */ w = w4rev; w2 = w2rev; @@ -278,7 +278,7 @@ do_file(char const *const fname) } } break; case ELFDATA2MSB: { - if (0 != *(unsigned char const *)&endian) { + if (*(unsigned char const *)&endian != 0) { /* main() is little endian, file.o is big endian. */ w = w4rev; w2 = w2rev; @@ -286,9 +286,9 @@ do_file(char const *const fname) } } break; } /* end switch */ - if (0 != memcmp(ELFMAG, ehdr->e_ident, SELFMAG) - || ET_REL != w2(ehdr->e_type) - || EV_CURRENT != ehdr->e_ident[EI_VERSION]) { + if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 + || w2(ehdr->e_type) != ET_REL + || ehdr->e_ident[EI_VERSION] != EV_CURRENT) { fprintf(stderr, "unrecognized ET_REL file %s\n", fname); fail_file(); } @@ -321,15 +321,15 @@ do_file(char const *const fname) fail_file(); } break; case ELFCLASS32: { - if (sizeof(Elf32_Ehdr) != w2(ehdr->e_ehsize) - || sizeof(Elf32_Shdr) != w2(ehdr->e_shentsize)) { + if (w2(ehdr->e_ehsize) != sizeof(Elf32_Ehdr) + || w2(ehdr->e_shentsize) != sizeof(Elf32_Shdr)) { fprintf(stderr, "unrecognized ET_REL file: %s\n", fname); fail_file(); } - if (EM_S390 == w2(ehdr->e_machine)) + if (w2(ehdr->e_machine) == EM_S390) reltype = R_390_32; - if (EM_MIPS == w2(ehdr->e_machine)) { + if (w2(ehdr->e_machine) == EM_MIPS) { reltype = R_MIPS_32; is_fake_mcount32 = MIPS32_is_fake_mcount; } @@ -337,15 +337,15 @@ do_file(char const *const fname) } break; case ELFCLASS64: { Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr; - if (sizeof(Elf64_Ehdr) != w2(ghdr->e_ehsize) - || sizeof(Elf64_Shdr) != w2(ghdr->e_shentsize)) { + if (w2(ghdr->e_ehsize) != sizeof(Elf64_Ehdr) + || w2(ghdr->e_shentsize) != sizeof(Elf64_Shdr)) { fprintf(stderr, "unrecognized ET_REL file: %s\n", fname); fail_file(); } - if (EM_S390 == w2(ghdr->e_machine)) + if (w2(ghdr->e_machine) == EM_S390) reltype = R_390_64; - if (EM_MIPS == w2(ghdr->e_machine)) { + if (w2(ghdr->e_machine) == EM_MIPS) { reltype = R_MIPS_64; Elf64_r_sym = MIPS64_r_sym; Elf64_r_info = MIPS64_r_info; @@ -371,7 +371,7 @@ main(int argc, char const *argv[]) } /* Process each file in turn, allowing deep failure. */ - for (--argc, ++argv; 0 < argc; --argc, ++argv) { + for (--argc, ++argv; argc > 0; --argc, ++argv) { int const sjval = setjmp(jmpenv); int len; diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h index baf187bee98..ac7b3303cb3 100644 --- a/scripts/recordmcount.h +++ b/scripts/recordmcount.h @@ -275,12 +275,12 @@ static uint_t *sift_rel_mcount(uint_t *mlocp, Elf_Sym const *const symp = &sym0[Elf_r_sym(relp)]; char const *symname = &str0[w(symp->st_name)]; - char const *mcount = '_' == gpfx ? "_mcount" : "mcount"; + char const *mcount = gpfx == '_' ? "_mcount" : "mcount"; - if ('.' == symname[0]) + if (symname[0] == '.') ++symname; /* ppc64 hack */ - if (0 == strcmp(mcount, symname) || - (altmcount && 0 == strcmp(altmcount, symname))) + if (strcmp(mcount, symname) == 0 || + (altmcount && strcmp(altmcount, symname) == 0)) mcountsym = Elf_r_sym(relp); } @@ -290,7 +290,7 @@ static uint_t *sift_rel_mcount(uint_t *mlocp, mrelp->r_offset = _w(offbase + ((void *)mlocp - (void *)mloc0)); Elf_r_info(mrelp, recsym, reltype); - if (sizeof(Elf_Rela) == rel_entsize) { + if (rel_entsize == sizeof(Elf_Rela)) { ((Elf_Rela *)mrelp)->r_addend = addend; *mlocp++ = 0; } else @@ -354,12 +354,12 @@ __has_rel_mcount(Elf_Shdr const *const relhdr, /* is SHT_REL or SHT_RELA */ Elf_Shdr const *const txthdr = &shdr0[w(relhdr->sh_info)]; char const *const txtname = &shstrtab[w(txthdr->sh_name)]; - if (0 == strcmp("__mcount_loc", txtname)) { + if (strcmp("__mcount_loc", txtname) == 0) { fprintf(stderr, "warning: __mcount_loc already exists: %s\n", fname); succeed_file(); } - if (SHT_PROGBITS != w(txthdr->sh_type) || + if (w(txthdr->sh_type) != SHT_PROGBITS || !is_mcounted_section_name(txtname)) return NULL; return txtname; @@ -370,7 +370,7 @@ static char const *has_rel_mcount(Elf_Shdr const *const relhdr, char const *const shstrtab, char const *const fname) { - if (SHT_REL != w(relhdr->sh_type) && SHT_RELA != w(relhdr->sh_type)) + if (w(relhdr->sh_type) != SHT_REL && w(relhdr->sh_type) != SHT_RELA) return NULL; return __has_rel_mcount(relhdr, shdr0, shstrtab, fname); } -- cgit v1.2.3 From e90b0c8bf211958a296d60369fecd51b35864407 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 6 Apr 2011 13:32:24 -0400 Subject: ftrace/trivial: Clean up record mcount to use Linux switch style The Linux style for switch statements is: switch (var) { case x: [...] break; } Not: switch (var) { case x: { [...] } break; Cc: John Reiser Link: http://lkml.kernel.org/r/20110421023737.523968644@goodmis.org Signed-off-by: Steven Rostedt --- scripts/recordmcount.c | 43 ++++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index 37afe0ecacb..4ebd8399cb3 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -264,27 +264,27 @@ do_file(char const *const fname) w8 = w8nat; switch (ehdr->e_ident[EI_DATA]) { static unsigned int const endian = 1; - default: { + default: fprintf(stderr, "unrecognized ELF data encoding %d: %s\n", ehdr->e_ident[EI_DATA], fname); fail_file(); - } break; - case ELFDATA2LSB: { + break; + case ELFDATA2LSB: if (*(unsigned char const *)&endian != 1) { /* main() is big endian, file.o is little endian. */ w = w4rev; w2 = w2rev; w8 = w8rev; } - } break; - case ELFDATA2MSB: { + break; + case ELFDATA2MSB: if (*(unsigned char const *)&endian != 0) { /* main() is little endian, file.o is big endian. */ w = w4rev; w2 = w2rev; w8 = w8rev; } - } break; + break; } /* end switch */ if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 || w2(ehdr->e_type) != ET_REL @@ -295,11 +295,11 @@ do_file(char const *const fname) gpfx = 0; switch (w2(ehdr->e_machine)) { - default: { + default: fprintf(stderr, "unrecognized e_machine %d %s\n", w2(ehdr->e_machine), fname); fail_file(); - } break; + break; case EM_386: reltype = R_386_32; break; case EM_ARM: reltype = R_ARM_ABS32; altmcount = "__gnu_mcount_nc"; @@ -315,12 +315,12 @@ do_file(char const *const fname) } /* end switch */ switch (ehdr->e_ident[EI_CLASS]) { - default: { + default: fprintf(stderr, "unrecognized ELF class %d %s\n", ehdr->e_ident[EI_CLASS], fname); fail_file(); - } break; - case ELFCLASS32: { + break; + case ELFCLASS32: if (w2(ehdr->e_ehsize) != sizeof(Elf32_Ehdr) || w2(ehdr->e_shentsize) != sizeof(Elf32_Shdr)) { fprintf(stderr, @@ -334,7 +334,7 @@ do_file(char const *const fname) is_fake_mcount32 = MIPS32_is_fake_mcount; } do32(ehdr, fname, reltype); - } break; + break; case ELFCLASS64: { Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr; if (w2(ghdr->e_ehsize) != sizeof(Elf64_Ehdr) @@ -352,7 +352,8 @@ do_file(char const *const fname) is_fake_mcount64 = MIPS64_is_fake_mcount; } do64(ghdr, fname, reltype); - } break; + break; + } } /* end switch */ cleanup(); @@ -386,23 +387,23 @@ main(int argc, char const *argv[]) continue; switch (sjval) { - default: { + default: fprintf(stderr, "internal error: %s\n", argv[0]); exit(1); - } break; - case SJ_SETJMP: { /* normal sequence */ + break; + case SJ_SETJMP: /* normal sequence */ /* Avoid problems if early cleanup() */ fd_map = -1; ehdr_curr = NULL; mmap_failed = 1; do_file(argv[0]); - } break; - case SJ_FAIL: { /* error in do_file or below */ + break; + case SJ_FAIL: /* error in do_file or below */ ++n_error; - } break; - case SJ_SUCCEED: { /* premature success */ + break; + case SJ_SUCCEED: /* premature success */ /* do nothing */ - } break; + break; } /* end switch */ } return !!n_error; -- cgit v1.2.3 From 9f087e7612115b7a201d4f3392a95ac7408948ab Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 6 Apr 2011 14:10:22 -0400 Subject: ftrace: Add .kprobe.text section to whitelist for recordmcount.c The .kprobe.text section is safe to modify mcount to nop and tracing. Add it to the whitelist in recordmcount.c and recordmcount.pl. Cc: John Reiser Cc: Masami Hiramatsu Link: http://lkml.kernel.org/r/20110421023737.743350547@goodmis.org Signed-off-by: Steven Rostedt --- scripts/recordmcount.c | 1 + scripts/recordmcount.pl | 1 + 2 files changed, 2 insertions(+) diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index 4ebd8399cb3..37c59654c13 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -211,6 +211,7 @@ is_mcounted_section_name(char const *const txtname) strcmp(".sched.text", txtname) == 0 || strcmp(".spinlock.text", txtname) == 0 || strcmp(".irqentry.text", txtname) == 0 || + strcmp(".kprobes.text", txtname) == 0 || strcmp(".text.unlikely", txtname) == 0; } diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index 4be0deea71c..a871cd41405 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -134,6 +134,7 @@ my %text_sections = ( ".sched.text" => 1, ".spinlock.text" => 1, ".irqentry.text" => 1, + ".kprobes.text" => 1, ".text.unlikely" => 1, ); -- cgit v1.2.3 From 8abd5724a7f1631ab2276954156c629d4d17149a Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 13 Apr 2011 13:31:08 -0400 Subject: ftrace/recordmcount: Modify only executable sections PROGBITS is not enough to determine if the section should be modified or not. Only process sections that are marked as executable. Cc: John Reiser Link: http://lkml.kernel.org/r/20110421023737.991485123@goodmis.org Signed-off-by: Steven Rostedt --- scripts/recordmcount.h | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h index ac7b3303cb3..7f8d5c4c780 100644 --- a/scripts/recordmcount.h +++ b/scripts/recordmcount.h @@ -360,6 +360,7 @@ __has_rel_mcount(Elf_Shdr const *const relhdr, /* is SHT_REL or SHT_RELA */ succeed_file(); } if (w(txthdr->sh_type) != SHT_PROGBITS || + !(w(txthdr->sh_flags) & SHF_EXECINSTR) || !is_mcounted_section_name(txtname)) return NULL; return txtname; -- cgit v1.2.3 From ffd618fa39284f8cc343894b566dd42ec6e74e77 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 8 Apr 2011 03:58:48 -0400 Subject: ftrace/recordmcount: Make ignored mcount calls into nops at compile time There are sections that are ignored by ftrace for the function tracing because the text is in a section that can be removed without notice. The mcount calls in these sections are ignored and ftrace never sees them. The downside of this is that the functions in these sections still call mcount. Although the mcount function is defined in assembly simply as a return, this added overhead is unnecessary. The solution is to convert these callers into nops at compile time. A better solution is to add 'notrace' to the section markers, but as new sections come up all the time, it would be nice that they are delt with when they are created. Later patches will deal with finding these sections and doing the proper solution. Thanks to H. Peter Anvin for giving me the right nops to use for x86. Cc: "H. Peter Anvin" Cc: John Reiser Link: http://lkml.kernel.org/r/20110421023738.237101176@goodmis.org Signed-off-by: Steven Rostedt --- scripts/recordmcount.c | 40 ++++++++++++++++++++++-- scripts/recordmcount.h | 82 +++++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 116 insertions(+), 6 deletions(-) diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index 37c59654c13..78054a41d13 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -118,6 +118,34 @@ umalloc(size_t size) return addr; } +static unsigned char ideal_nop5_x86_64[5] = { 0x0f, 0x1f, 0x44, 0x00, 0x00 }; +static unsigned char ideal_nop5_x86_32[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 }; +static unsigned char *ideal_nop; + +static char rel_type_nop; + +static int (*make_nop)(void *map, size_t const offset); + +static int make_nop_x86(void *map, size_t const offset) +{ + uint32_t *ptr; + unsigned char *op; + + /* Confirm we have 0xe8 0x0 0x0 0x0 0x0 */ + ptr = map + offset; + if (*ptr != 0) + return -1; + + op = map + offset - 1; + if (*op != 0xe8) + return -1; + + /* convert to nop */ + ulseek(fd_map, offset - 1, SEEK_SET); + uwrite(fd_map, ideal_nop, 5); + return 0; +} + /* * Get the whole file as a programming convenience in order to avoid * malloc+lseek+read+free of many pieces. If successful, then mmap @@ -301,7 +329,11 @@ do_file(char const *const fname) w2(ehdr->e_machine), fname); fail_file(); break; - case EM_386: reltype = R_386_32; break; + case EM_386: + reltype = R_386_32; + make_nop = make_nop_x86; + ideal_nop = ideal_nop5_x86_32; + break; case EM_ARM: reltype = R_ARM_ABS32; altmcount = "__gnu_mcount_nc"; break; @@ -312,7 +344,11 @@ do_file(char const *const fname) case EM_S390: /* reltype: e_class */ gpfx = '_'; break; case EM_SH: reltype = R_SH_DIR32; break; case EM_SPARCV9: reltype = R_SPARC_64; gpfx = '_'; break; - case EM_X86_64: reltype = R_X86_64_64; break; + case EM_X86_64: + make_nop = make_nop_x86; + ideal_nop = ideal_nop5_x86_64; + reltype = R_X86_64_64; + break; } /* end switch */ switch (ehdr->e_ident[EI_CLASS]) { diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h index 7f8d5c4c780..657dbedd1c7 100644 --- a/scripts/recordmcount.h +++ b/scripts/recordmcount.h @@ -23,6 +23,7 @@ #undef fn_is_fake_mcount #undef MIPS_is_fake_mcount #undef sift_rel_mcount +#undef nop_mcount #undef find_secsym_ndx #undef __has_rel_mcount #undef has_rel_mcount @@ -49,6 +50,7 @@ #ifdef RECORD_MCOUNT_64 # define append_func append64 # define sift_rel_mcount sift64_rel_mcount +# define nop_mcount nop_mcount_64 # define find_secsym_ndx find64_secsym_ndx # define __has_rel_mcount __has64_rel_mcount # define has_rel_mcount has64_rel_mcount @@ -77,6 +79,7 @@ #else # define append_func append32 # define sift_rel_mcount sift32_rel_mcount +# define nop_mcount nop_mcount_32 # define find_secsym_ndx find32_secsym_ndx # define __has_rel_mcount __has32_rel_mcount # define has_rel_mcount has32_rel_mcount @@ -304,6 +307,70 @@ static uint_t *sift_rel_mcount(uint_t *mlocp, return mlocp; } +/* + * Read the relocation table again, but this time its called on sections + * that are not going to be traced. The mcount calls here will be converted + * into nops. + */ +static void nop_mcount(Elf_Shdr const *const relhdr, + Elf_Ehdr const *const ehdr) +{ + Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff) + + (void *)ehdr); + unsigned const symsec_sh_link = w(relhdr->sh_link); + Elf_Shdr const *const symsec = &shdr0[symsec_sh_link]; + Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symsec->sh_offset) + + (void *)ehdr); + + Elf_Shdr const *const strsec = &shdr0[w(symsec->sh_link)]; + char const *const str0 = (char const *)(_w(strsec->sh_offset) + + (void *)ehdr); + + Elf_Rel const *const rel0 = (Elf_Rel const *)(_w(relhdr->sh_offset) + + (void *)ehdr); + unsigned rel_entsize = _w(relhdr->sh_entsize); + unsigned const nrel = _w(relhdr->sh_size) / rel_entsize; + Elf_Rel const *relp = rel0; + + Elf_Shdr const *const shdr = &shdr0[w(relhdr->sh_info)]; + + unsigned mcountsym = 0; + unsigned t; + + for (t = nrel; t; --t) { + int ret = -1; + + if (!mcountsym) { + Elf_Sym const *const symp = + &sym0[Elf_r_sym(relp)]; + char const *symname = &str0[w(symp->st_name)]; + char const *mcount = gpfx == '_' ? "_mcount" : "mcount"; + + if (symname[0] == '.') + ++symname; /* ppc64 hack */ + if (strcmp(mcount, symname) == 0 || + (altmcount && strcmp(altmcount, symname) == 0)) + mcountsym = Elf_r_sym(relp); + } + + if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) + ret = make_nop((void *)ehdr, shdr->sh_offset + relp->r_offset); + + /* + * If we successfully removed the mcount, mark the relocation + * as a nop (don't do anything with it). + */ + if (!ret) { + Elf_Rel rel; + rel = *(Elf_Rel *)relp; + Elf_r_info(&rel, Elf_r_sym(relp), rel_type_nop); + ulseek(fd_map, (void *)relp - (void *)ehdr, SEEK_SET); + uwrite(fd_map, &rel, sizeof(rel)); + } + relp = (Elf_Rel const *)(rel_entsize + (void *)relp); + } +} + /* * Find a symbol in the given section, to be used as the base for relocating @@ -360,8 +427,7 @@ __has_rel_mcount(Elf_Shdr const *const relhdr, /* is SHT_REL or SHT_RELA */ succeed_file(); } if (w(txthdr->sh_type) != SHT_PROGBITS || - !(w(txthdr->sh_flags) & SHF_EXECINSTR) || - !is_mcounted_section_name(txtname)) + !(w(txthdr->sh_flags) & SHF_EXECINSTR)) return NULL; return txtname; } @@ -384,9 +450,11 @@ static unsigned tot_relsize(Elf_Shdr const *const shdr0, { unsigned totrelsz = 0; Elf_Shdr const *shdrp = shdr0; + char const *txtname; for (; nhdr; --nhdr, ++shdrp) { - if (has_rel_mcount(shdrp, shdr0, shstrtab, fname)) + txtname = has_rel_mcount(shdrp, shdr0, shstrtab, fname); + if (txtname && is_mcounted_section_name(txtname)) totrelsz += _w(shdrp->sh_size); } return totrelsz; @@ -422,7 +490,7 @@ do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype) for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) { char const *const txtname = has_rel_mcount(relhdr, shdr0, shstrtab, fname); - if (txtname) { + if (txtname && is_mcounted_section_name(txtname)) { uint_t recval = 0; unsigned const recsym = find_secsym_ndx( w(relhdr->sh_info), txtname, &recval, @@ -433,6 +501,12 @@ do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype) mlocp = sift_rel_mcount(mlocp, (void *)mlocp - (void *)mloc0, &mrelp, relhdr, ehdr, recsym, recval, reltype); + } else if (make_nop && txtname) { + /* + * This section is ignored by ftrace, but still + * has mcount calls. Convert them to nops now. + */ + nop_mcount(relhdr, ehdr); } } if (mloc0 != mlocp) { -- cgit v1.2.3 From dfad3d598c4bbbaf137588e22bac1ce624529f7e Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 12 Apr 2011 18:53:25 -0400 Subject: ftrace/recordmcount: Add warning logic to warn on mcount not recorded There's some sections that should not have mcount recorded and should not have modifications to the that code. But currently they waste some time by calling mcount anyway (which simply returns). As the real answer should be to either whitelist the section or have gcc ignore it fully. This change adds a option to recordmcount to warn when it finds a section that is ignored by ftrace but still contains mcount callers. This is not on by default as developers may not know if the section should be completely ignored or added to the whitelist. Cc: John Reiser Link: http://lkml.kernel.org/r/20110421023738.476989377@goodmis.org Signed-off-by: Steven Rostedt --- scripts/recordmcount.c | 32 ++++++++++++++++++++++++-------- scripts/recordmcount.h | 22 +++++++++++++++++----- 2 files changed, 41 insertions(+), 13 deletions(-) diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index 78054a41d13..0e18975824f 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -39,6 +40,7 @@ static char gpfx; /* prefix for global symbol name (sometimes '_') */ static struct stat sb; /* Remember .st_size, etc. */ static jmp_buf jmpenv; /* setjmp/longjmp per-file error escape */ static const char *altmcount; /* alternate mcount symbol name */ +static int warn_on_notrace_sect; /* warn when section has mcount not being recorded */ /* setjmp() return values */ enum { @@ -397,19 +399,33 @@ do_file(char const *const fname) } int -main(int argc, char const *argv[]) +main(int argc, char *argv[]) { const char ftrace[] = "/ftrace.o"; int ftrace_size = sizeof(ftrace) - 1; int n_error = 0; /* gcc-4.3.0 false positive complaint */ + int c; + int i; - if (argc <= 1) { - fprintf(stderr, "usage: recordmcount file.o...\n"); + while ((c = getopt(argc, argv, "w")) >= 0) { + switch (c) { + case 'w': + warn_on_notrace_sect = 1; + break; + default: + fprintf(stderr, "usage: recordmcount [-w] file.o...\n"); + return 0; + } + } + + if ((argc - optind) < 1) { + fprintf(stderr, "usage: recordmcount [-w] file.o...\n"); return 0; } /* Process each file in turn, allowing deep failure. */ - for (--argc, ++argv; argc > 0; --argc, ++argv) { + for (i = optind; i < argc; i++) { + char *file = argv[i]; int const sjval = setjmp(jmpenv); int len; @@ -418,14 +434,14 @@ main(int argc, char const *argv[]) * function but does not call it. Since ftrace.o should * not be traced anyway, we just skip it. */ - len = strlen(argv[0]); + len = strlen(file); if (len >= ftrace_size && - strcmp(argv[0] + (len - ftrace_size), ftrace) == 0) + strcmp(file + (len - ftrace_size), ftrace) == 0) continue; switch (sjval) { default: - fprintf(stderr, "internal error: %s\n", argv[0]); + fprintf(stderr, "internal error: %s\n", file); exit(1); break; case SJ_SETJMP: /* normal sequence */ @@ -433,7 +449,7 @@ main(int argc, char const *argv[]) fd_map = -1; ehdr_curr = NULL; mmap_failed = 1; - do_file(argv[0]); + do_file(file); break; case SJ_FAIL: /* error in do_file or below */ ++n_error; diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h index 657dbedd1c7..22033d563bc 100644 --- a/scripts/recordmcount.h +++ b/scripts/recordmcount.h @@ -313,7 +313,8 @@ static uint_t *sift_rel_mcount(uint_t *mlocp, * into nops. */ static void nop_mcount(Elf_Shdr const *const relhdr, - Elf_Ehdr const *const ehdr) + Elf_Ehdr const *const ehdr, + const char *const txtname) { Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff) + (void *)ehdr); @@ -336,6 +337,7 @@ static void nop_mcount(Elf_Shdr const *const relhdr, unsigned mcountsym = 0; unsigned t; + int once = 0; for (t = nrel; t; --t) { int ret = -1; @@ -353,8 +355,18 @@ static void nop_mcount(Elf_Shdr const *const relhdr, mcountsym = Elf_r_sym(relp); } - if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) - ret = make_nop((void *)ehdr, shdr->sh_offset + relp->r_offset); + if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) { + if (make_nop) + ret = make_nop((void *)ehdr, shdr->sh_offset + relp->r_offset); + if (warn_on_notrace_sect && !once) { + printf("Section %s has mcount callers being ignored\n", + txtname); + once = 1; + /* just warn? */ + if (!make_nop) + return; + } + } /* * If we successfully removed the mcount, mark the relocation @@ -501,12 +513,12 @@ do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype) mlocp = sift_rel_mcount(mlocp, (void *)mlocp - (void *)mloc0, &mrelp, relhdr, ehdr, recsym, recval, reltype); - } else if (make_nop && txtname) { + } else if (txtname && (warn_on_notrace_sect || make_nop)) { /* * This section is ignored by ftrace, but still * has mcount calls. Convert them to nops now. */ - nop_mcount(relhdr, ehdr); + nop_mcount(relhdr, ehdr, txtname); } } if (mloc0 != mlocp) { -- cgit v1.2.3 From 85356f802225fedeee8c3e65bdd93b263ace0a8b Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 12 Apr 2011 18:59:10 -0400 Subject: kbuild/recordmcount: Add RECORDMCOUNT_WARN to warn about mcount callers When mcount is called in a section that ftrace will not modify it into a nop, we want to warn about this. But not warn about this always. Now if the user builds the kernel with the option RECORDMCOUNT_WARN=1 then the build will warn about mcount callers that are ignored and will just waste execution time. Acked-by: Michal Marek Cc: linux-kbuild@vger.kernel.org Link: http://lkml.kernel.org/r/20110421023738.714956282@goodmis.org Signed-off-by: Steven Rostedt --- Makefile | 1 + scripts/Makefile.build | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 41ea6fbec55..e7d01adaf69 100644 --- a/Makefile +++ b/Makefile @@ -1268,6 +1268,7 @@ help: @echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)' @echo ' make C=2 [targets] Force check of all c source with $$CHECK' @echo ' make W=1 [targets] Enable extra gcc checks' + @echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections' @echo '' @echo 'Execute "make" or "make all" to build all targets marked with [*] ' @echo 'For further info see the ./README file' diff --git a/scripts/Makefile.build b/scripts/Makefile.build index d5f925abe4d..fdca952f6a4 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -244,13 +244,16 @@ endif ifdef CONFIG_FTRACE_MCOUNT_RECORD ifdef BUILD_C_RECORDMCOUNT +ifeq ("$(origin RECORDMCOUNT_WARN)", "command line") + RECORDMCOUNT_FLAGS = -w +endif # Due to recursion, we must skip empty.o. # The empty.o file is created in the make process in order to determine # the target endianness and word size. It is made before all other C # files, including recordmcount. sub_cmd_record_mcount = \ if [ $(@) != "scripts/mod/empty.o" ]; then \ - $(objtree)/scripts/recordmcount "$(@)"; \ + $(objtree)/scripts/recordmcount $(RECORDMCOUNT_FLAGS) "$(@)"; \ fi; else sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \ -- cgit v1.2.3 From f0201738b61b1adcf6b2c4719c5c415745014c1c Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 12 Apr 2011 19:06:39 -0400 Subject: ftrace: Avoid recording mcount on .init sections directly The init and exit sections should not be traced and adding a call to mcount to them is a waste of text and instruction cache. Have the macro section attributes include notrace to ignore these functions for tracing from the build. Link: http://lkml.kernel.org/r/20110421023738.953028219@goodmis.org Signed-off-by: Steven Rostedt --- include/linux/init.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/include/linux/init.h b/include/linux/init.h index 577671c5515..9146f39cddd 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -79,29 +79,29 @@ #define __exitused __used #endif -#define __exit __section(.exit.text) __exitused __cold +#define __exit __section(.exit.text) __exitused __cold notrace /* Used for HOTPLUG */ -#define __devinit __section(.devinit.text) __cold +#define __devinit __section(.devinit.text) __cold notrace #define __devinitdata __section(.devinit.data) #define __devinitconst __section(.devinit.rodata) -#define __devexit __section(.devexit.text) __exitused __cold +#define __devexit __section(.devexit.text) __exitused __cold notrace #define __devexitdata __section(.devexit.data) #define __devexitconst __section(.devexit.rodata) /* Used for HOTPLUG_CPU */ -#define __cpuinit __section(.cpuinit.text) __cold +#define __cpuinit __section(.cpuinit.text) __cold notrace #define __cpuinitdata __section(.cpuinit.data) #define __cpuinitconst __section(.cpuinit.rodata) -#define __cpuexit __section(.cpuexit.text) __exitused __cold +#define __cpuexit __section(.cpuexit.text) __exitused __cold notrace #define __cpuexitdata __section(.cpuexit.data) #define __cpuexitconst __section(.cpuexit.rodata) /* Used for MEMORY_HOTPLUG */ -#define __meminit __section(.meminit.text) __cold +#define __meminit __section(.meminit.text) __cold notrace #define __meminitdata __section(.meminit.data) #define __meminitconst __section(.meminit.rodata) -#define __memexit __section(.memexit.text) __exitused __cold +#define __memexit __section(.memexit.text) __exitused __cold notrace #define __memexitdata __section(.memexit.data) #define __memexitconst __section(.memexit.rodata) -- cgit v1.2.3 From 2895cd2ab81dfb7bc22637bc110857db44a30b4a Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 13 Apr 2011 16:43:29 -0400 Subject: ftrace/x86: Do not trace .discard.text section The section called .discard.text has tracing attached to it and is currently ignored by ftrace. But it does include a call to the mcount stub. Adding a notrace to the code keeps gcc from adding the useless mcount caller to it. Link: http://lkml.kernel.org/r/20110421023739.243651696@goodmis.org Signed-off-by: Steven Rostedt --- arch/x86/include/asm/setup.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index db8aa19a08a..647d8a06ce4 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -88,7 +88,7 @@ void *extend_brk(size_t size, size_t align); * executable.) */ #define RESERVE_BRK(name,sz) \ - static void __section(.discard.text) __used \ + static void __section(.discard.text) __used notrace \ __brk_reservation_fn_##name##__(void) { \ asm volatile ( \ ".pushsection .brk_reservation,\"aw\",@nobits;" \ -- cgit v1.2.3 From 37762cb9977626343b3cd1aab9146313c94748c2 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 20 Apr 2011 20:47:34 -0400 Subject: ftrace/recordmcount: Remove duplicate code to find mcount symbol The code in sift_rel_mcount() and nop_mcount() to get the mcount symbol number is identical. Replace the two locations with a call to a function that does the work. Cc: John Reiser Link: http://lkml.kernel.org/r/20110421023739.488093407@goodmis.org Signed-off-by: Steven Rostedt --- scripts/recordmcount.h | 51 ++++++++++++++++++++++++++------------------------ 1 file changed, 27 insertions(+), 24 deletions(-) diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h index 22033d563bc..deb6a517638 100644 --- a/scripts/recordmcount.h +++ b/scripts/recordmcount.h @@ -28,6 +28,7 @@ #undef __has_rel_mcount #undef has_rel_mcount #undef tot_relsize +#undef get_mcountsym #undef do_func #undef Elf_Addr #undef Elf_Ehdr @@ -56,6 +57,7 @@ # define has_rel_mcount has64_rel_mcount # define tot_relsize tot64_relsize # define do_func do64 +# define get_mcountsym get_mcountsym_64 # define is_fake_mcount is_fake_mcount64 # define fn_is_fake_mcount fn_is_fake_mcount64 # define MIPS_is_fake_mcount MIPS64_is_fake_mcount @@ -85,6 +87,7 @@ # define has_rel_mcount has32_rel_mcount # define tot_relsize tot32_relsize # define do_func do32 +# define get_mcountsym get_mcountsym_32 # define is_fake_mcount is_fake_mcount32 # define fn_is_fake_mcount fn_is_fake_mcount32 # define MIPS_is_fake_mcount MIPS32_is_fake_mcount @@ -237,6 +240,26 @@ static void append_func(Elf_Ehdr *const ehdr, uwrite(fd_map, ehdr, sizeof(*ehdr)); } +static unsigned get_mcountsym(Elf_Sym const *const sym0, + Elf_Rel const *relp, + char const *const str0) +{ + unsigned mcountsym = 0; + + Elf_Sym const *const symp = + &sym0[Elf_r_sym(relp)]; + char const *symname = &str0[w(symp->st_name)]; + char const *mcount = gpfx == '_' ? "_mcount" : "mcount"; + + if (symname[0] == '.') + ++symname; /* ppc64 hack */ + if (strcmp(mcount, symname) == 0 || + (altmcount && strcmp(altmcount, symname) == 0)) + mcountsym = Elf_r_sym(relp); + + return mcountsym; +} + /* * Look at the relocations in order to find the calls to mcount. * Accumulate the section offsets that are found, and their relocation info, @@ -274,18 +297,8 @@ static uint_t *sift_rel_mcount(uint_t *mlocp, unsigned t; for (t = nrel; t; --t) { - if (!mcountsym) { - Elf_Sym const *const symp = - &sym0[Elf_r_sym(relp)]; - char const *symname = &str0[w(symp->st_name)]; - char const *mcount = gpfx == '_' ? "_mcount" : "mcount"; - - if (symname[0] == '.') - ++symname; /* ppc64 hack */ - if (strcmp(mcount, symname) == 0 || - (altmcount && strcmp(altmcount, symname) == 0)) - mcountsym = Elf_r_sym(relp); - } + if (!mcountsym) + mcountsym = get_mcountsym(sym0, relp, str0); if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) { uint_t const addend = _w(_w(relp->r_offset) - recval); @@ -342,18 +355,8 @@ static void nop_mcount(Elf_Shdr const *const relhdr, for (t = nrel; t; --t) { int ret = -1; - if (!mcountsym) { - Elf_Sym const *const symp = - &sym0[Elf_r_sym(relp)]; - char const *symname = &str0[w(symp->st_name)]; - char const *mcount = gpfx == '_' ? "_mcount" : "mcount"; - - if (symname[0] == '.') - ++symname; /* ppc64 hack */ - if (strcmp(mcount, symname) == 0 || - (altmcount && strcmp(altmcount, symname) == 0)) - mcountsym = Elf_r_sym(relp); - } + if (!mcountsym) + mcountsym = get_mcountsym(sym0, relp, str0); if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) { if (make_nop) -- cgit v1.2.3 From 41b402a201a12efdff4acc990e023a89a409cd41 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 20 Apr 2011 21:13:06 -0400 Subject: ftrace/recordmcount: Add helper function get_sym_str_and_relp() The code to get the symbol, string, and relp pointers in the two functions sift_rel_mcount() and nop_mcount() are identical and also non-trivial. Moving this duplicate code into a single helper function makes the code easier to read and more maintainable. Cc: John Reiser Link: http://lkml.kernel.org/r/20110421023739.723658553@goodmis.org Signed-off-by: Steven Rostedt --- scripts/recordmcount.h | 67 ++++++++++++++++++++++++++++---------------------- 1 file changed, 37 insertions(+), 30 deletions(-) diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h index deb6a517638..3c00faba745 100644 --- a/scripts/recordmcount.h +++ b/scripts/recordmcount.h @@ -29,6 +29,7 @@ #undef has_rel_mcount #undef tot_relsize #undef get_mcountsym +#undef get_sym_str_and_relp #undef do_func #undef Elf_Addr #undef Elf_Ehdr @@ -56,6 +57,7 @@ # define __has_rel_mcount __has64_rel_mcount # define has_rel_mcount has64_rel_mcount # define tot_relsize tot64_relsize +# define get_sym_str_and_relp get_sym_str_and_relp_64 # define do_func do64 # define get_mcountsym get_mcountsym_64 # define is_fake_mcount is_fake_mcount64 @@ -86,6 +88,7 @@ # define __has_rel_mcount __has32_rel_mcount # define has_rel_mcount has32_rel_mcount # define tot_relsize tot32_relsize +# define get_sym_str_and_relp get_sym_str_and_relp_32 # define do_func do32 # define get_mcountsym get_mcountsym_32 # define is_fake_mcount is_fake_mcount32 @@ -260,6 +263,29 @@ static unsigned get_mcountsym(Elf_Sym const *const sym0, return mcountsym; } +static void get_sym_str_and_relp(Elf_Shdr const *const relhdr, + Elf_Ehdr const *const ehdr, + Elf_Sym const **sym0, + char const **str0, + Elf_Rel const **relp) +{ + Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff) + + (void *)ehdr); + unsigned const symsec_sh_link = w(relhdr->sh_link); + Elf_Shdr const *const symsec = &shdr0[symsec_sh_link]; + Elf_Shdr const *const strsec = &shdr0[w(symsec->sh_link)]; + Elf_Rel const *const rel0 = (Elf_Rel const *)(_w(relhdr->sh_offset) + + (void *)ehdr); + + *sym0 = (Elf_Sym const *)(_w(symsec->sh_offset) + + (void *)ehdr); + + *str0 = (char const *)(_w(strsec->sh_offset) + + (void *)ehdr); + + *relp = rel0; +} + /* * Look at the relocations in order to find the calls to mcount. * Accumulate the section offsets that are found, and their relocation info, @@ -276,26 +302,16 @@ static uint_t *sift_rel_mcount(uint_t *mlocp, { uint_t *const mloc0 = mlocp; Elf_Rel *mrelp = *mrelpp; - Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff) - + (void *)ehdr); - unsigned const symsec_sh_link = w(relhdr->sh_link); - Elf_Shdr const *const symsec = &shdr0[symsec_sh_link]; - Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symsec->sh_offset) - + (void *)ehdr); - - Elf_Shdr const *const strsec = &shdr0[w(symsec->sh_link)]; - char const *const str0 = (char const *)(_w(strsec->sh_offset) - + (void *)ehdr); - - Elf_Rel const *const rel0 = (Elf_Rel const *)(_w(relhdr->sh_offset) - + (void *)ehdr); + Elf_Sym const *sym0; + char const *str0; + Elf_Rel const *relp; unsigned rel_entsize = _w(relhdr->sh_entsize); unsigned const nrel = _w(relhdr->sh_size) / rel_entsize; - Elf_Rel const *relp = rel0; - unsigned mcountsym = 0; unsigned t; + get_sym_str_and_relp(relhdr, ehdr, &sym0, &str0, &relp); + for (t = nrel; t; --t) { if (!mcountsym) mcountsym = get_mcountsym(sym0, relp, str0); @@ -331,27 +347,18 @@ static void nop_mcount(Elf_Shdr const *const relhdr, { Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff) + (void *)ehdr); - unsigned const symsec_sh_link = w(relhdr->sh_link); - Elf_Shdr const *const symsec = &shdr0[symsec_sh_link]; - Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symsec->sh_offset) - + (void *)ehdr); - - Elf_Shdr const *const strsec = &shdr0[w(symsec->sh_link)]; - char const *const str0 = (char const *)(_w(strsec->sh_offset) - + (void *)ehdr); - - Elf_Rel const *const rel0 = (Elf_Rel const *)(_w(relhdr->sh_offset) - + (void *)ehdr); + Elf_Sym const *sym0; + char const *str0; + Elf_Rel const *relp; + Elf_Shdr const *const shdr = &shdr0[w(relhdr->sh_info)]; unsigned rel_entsize = _w(relhdr->sh_entsize); unsigned const nrel = _w(relhdr->sh_size) / rel_entsize; - Elf_Rel const *relp = rel0; - - Elf_Shdr const *const shdr = &shdr0[w(relhdr->sh_info)]; - unsigned mcountsym = 0; unsigned t; int once = 0; + get_sym_str_and_relp(relhdr, ehdr, &sym0, &str0, &relp); + for (t = nrel; t; --t) { int ret = -1; -- cgit v1.2.3 From 07d8b595f367f4604e6027ad4cba33cbe3f55e10 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 10 May 2011 10:10:40 +0200 Subject: ftrace/recordmcount: mcount address adjustment Introduce mcount_adjust{,_32,_64} to the C implementation of recordmcount analog to $mcount_adjust in the perl script. The adjustment is added to the address of the relocations against the mcount symbol. If this adjustment is done by recordmcount at compile time the ftrace_call_adjust function can be turned into a nop. Cc: John Reiser Signed-off-by: Martin Schwidefsky Signed-off-by: Steven Rostedt --- scripts/recordmcount.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h index 3c00faba745..4be60364a40 100644 --- a/scripts/recordmcount.h +++ b/scripts/recordmcount.h @@ -22,6 +22,7 @@ #undef is_fake_mcount #undef fn_is_fake_mcount #undef MIPS_is_fake_mcount +#undef mcount_adjust #undef sift_rel_mcount #undef nop_mcount #undef find_secsym_ndx @@ -63,6 +64,7 @@ # define is_fake_mcount is_fake_mcount64 # define fn_is_fake_mcount fn_is_fake_mcount64 # define MIPS_is_fake_mcount MIPS64_is_fake_mcount +# define mcount_adjust mcount_adjust_64 # define Elf_Addr Elf64_Addr # define Elf_Ehdr Elf64_Ehdr # define Elf_Shdr Elf64_Shdr @@ -94,6 +96,7 @@ # define is_fake_mcount is_fake_mcount32 # define fn_is_fake_mcount fn_is_fake_mcount32 # define MIPS_is_fake_mcount MIPS32_is_fake_mcount +# define mcount_adjust mcount_adjust_32 # define Elf_Addr Elf32_Addr # define Elf_Ehdr Elf32_Ehdr # define Elf_Shdr Elf32_Shdr @@ -132,6 +135,8 @@ static void fn_ELF_R_INFO(Elf_Rel *const rp, unsigned sym, unsigned type) } static void (*Elf_r_info)(Elf_Rel *const rp, unsigned sym, unsigned type) = fn_ELF_R_INFO; +static int mcount_adjust = 0; + /* * MIPS mcount long call has 2 _mcount symbols, only the position of the 1st * _mcount symbol is needed for dynamic function tracer, with it, to disable @@ -317,8 +322,8 @@ static uint_t *sift_rel_mcount(uint_t *mlocp, mcountsym = get_mcountsym(sym0, relp, str0); if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) { - uint_t const addend = _w(_w(relp->r_offset) - recval); - + uint_t const addend = + _w(_w(relp->r_offset) - recval + mcount_adjust); mrelp->r_offset = _w(offbase + ((void *)mlocp - (void *)mloc0)); Elf_r_info(mrelp, recsym, reltype); -- cgit v1.2.3 From 521ccb5c4aece609311bfa7157910a8f0c942af5 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 10 May 2011 10:10:41 +0200 Subject: ftrace/x86: mcount offset calculation Do the mcount offset adjustment in the recordmcount.pl/recordmcount.[ch] at compile time and not in ftrace_call_adjust at run time. Signed-off-by: Martin Schwidefsky Signed-off-by: Steven Rostedt --- arch/x86/include/asm/ftrace.h | 7 +++---- scripts/recordmcount.c | 2 ++ scripts/recordmcount.pl | 2 ++ 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index db24c2278be..268c783ab1c 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -38,11 +38,10 @@ extern void mcount(void); static inline unsigned long ftrace_call_adjust(unsigned long addr) { /* - * call mcount is "e8 <4 byte offset>" - * The addr points to the 4 byte offset and the caller of this - * function wants the pointer to e8. Simply subtract one. + * addr is the address of the mcount call instruction. + * recordmcount does the necessary offset calculation. */ - return addr - 1; + return addr; } #ifdef CONFIG_DYNAMIC_FTRACE diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index 0e18975824f..7648a5d1115 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -335,6 +335,7 @@ do_file(char const *const fname) reltype = R_386_32; make_nop = make_nop_x86; ideal_nop = ideal_nop5_x86_32; + mcount_adjust_32 = -1; break; case EM_ARM: reltype = R_ARM_ABS32; altmcount = "__gnu_mcount_nc"; @@ -350,6 +351,7 @@ do_file(char const *const fname) make_nop = make_nop_x86; ideal_nop = ideal_nop5_x86_64; reltype = R_X86_64_64; + mcount_adjust_64 = -1; break; } /* end switch */ diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index a871cd41405..414e7f5e42e 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -223,6 +223,7 @@ if ($arch eq "x86_64") { $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount([+-]0x[0-9a-zA-Z]+)?\$"; $type = ".quad"; $alignment = 8; + $mcount_adjust = -1; # force flags for this arch $ld .= " -m elf_x86_64"; @@ -232,6 +233,7 @@ if ($arch eq "x86_64") { } elsif ($arch eq "i386") { $alignment = 4; + $mcount_adjust = -1; # force flags for this arch $ld .= " -m elf_i386"; -- cgit v1.2.3 From f29638868280534ed7e2fdd93b31557232597940 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 10 May 2011 10:10:43 +0200 Subject: ftrace/s390: mcount offset calculation Do the mcount offset adjustment in the recordmcount.pl/recordmcount.[ch] at compile time and not in ftrace_call_adjust at run time. Signed-off-by: Martin Schwidefsky Signed-off-by: Steven Rostedt --- arch/s390/include/asm/ftrace.h | 4 +--- scripts/recordmcount.c | 8 ++++++-- scripts/recordmcount.pl | 2 ++ 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h index 3c29be4836e..b7931faaef6 100644 --- a/arch/s390/include/asm/ftrace.h +++ b/arch/s390/include/asm/ftrace.h @@ -11,15 +11,13 @@ struct dyn_arch_ftrace { }; #ifdef CONFIG_64BIT #define MCOUNT_INSN_SIZE 12 -#define MCOUNT_OFFSET 8 #else #define MCOUNT_INSN_SIZE 20 -#define MCOUNT_OFFSET 4 #endif static inline unsigned long ftrace_call_adjust(unsigned long addr) { - return addr - MCOUNT_OFFSET; + return addr; } #endif /* __ASSEMBLY__ */ diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index 7648a5d1115..ee52cb8e17a 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -368,8 +368,10 @@ do_file(char const *const fname) "unrecognized ET_REL file: %s\n", fname); fail_file(); } - if (w2(ehdr->e_machine) == EM_S390) + if (w2(ehdr->e_machine) == EM_S390) { reltype = R_390_32; + mcount_adjust_32 = -4; + } if (w2(ehdr->e_machine) == EM_MIPS) { reltype = R_MIPS_32; is_fake_mcount32 = MIPS32_is_fake_mcount; @@ -384,8 +386,10 @@ do_file(char const *const fname) "unrecognized ET_REL file: %s\n", fname); fail_file(); } - if (w2(ghdr->e_machine) == EM_S390) + if (w2(ghdr->e_machine) == EM_S390) { reltype = R_390_64; + mcount_adjust_64 = -8; + } if (w2(ghdr->e_machine) == EM_MIPS) { reltype = R_MIPS_64; Elf64_r_sym = MIPS64_r_sym; diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index 414e7f5e42e..858966ab019 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -243,12 +243,14 @@ if ($arch eq "x86_64") { } elsif ($arch eq "s390" && $bits == 32) { $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_32\\s+_mcount\$"; + $mcount_adjust = -4; $alignment = 4; $ld .= " -m elf_s390"; $cc .= " -m31"; } elsif ($arch eq "s390" && $bits == 64) { $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$"; + $mcount_adjust = -8; $alignment = 8; $type = ".quad"; $ld .= " -m elf64_s390"; -- cgit v1.2.3 From 94692349c4fc1bc74c19a28f9379509361a06a3b Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Tue, 17 May 2011 15:36:19 +0200 Subject: perf: Fix multi-event parsing bug This patch fixes an issue with event parsing. The following commit appears to have broken the ability to specify a comma separated list of events: commit ceb53fbf6dbb1df26d38379a262c6981fe73dd36 Author: Ingo Molnar Date: Wed Apr 27 04:06:33 2011 +0200 perf stat: Fail more clearly when an invalid modifier is specified This patch fixes this while preserving the desired effect: $ perf stat -e instructions:u,instructions:k ls /dev/null /dev/null Performance counter stats for 'ls /dev/null': 365956 instructions:u # 0.00 insns per cycle 731806 instructions:k # 0.00 insns per cycle 0.001108862 seconds time elapsed $ perf stat -e task-clock-msecs true invalid event modifier: '-msecs' Run 'perf list' for a list of valid events and modifiers Signed-off-by: Stephane Eranian Cc: acme@redhat.com Cc: peterz@infradead.org Cc: fweisbec@gmail.com Link: http://lkml.kernel.org/r/20110517133619.GA6999@quad Signed-off-by: Ingo Molnar --- tools/perf/util/parse-events.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index ffa493a2433..41982c373fa 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -734,6 +734,9 @@ parse_event_modifier(const char **strp, struct perf_event_attr *attr) if (!*str) return 0; + if (*str == ',') + return 0; + if (*str++ != ':') return -1; -- cgit v1.2.3 From 2494b030ba9334c7dd7df9b9f7abe4eacc950ec5 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Tue, 17 May 2011 12:33:26 -0700 Subject: x86, cpufeature: Fix cpuid leaf 7 feature detection CPUID leaf 7, subleaf 0 returns the maximum subleaf in EAX, not the number of subleaves. Since so far only subleaf 0 is defined (and only the EBX bitfield) we do not need to qualify the test. Signed-off-by: Fenghua Yu Link: http://lkml.kernel.org/r/1305660806-17519-1-git-send-email-fenghua.yu@intel.com Signed-off-by: H. Peter Anvin Cc: 2.6.36..39 --- arch/x86/kernel/cpu/common.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index e2ced0074a4..173f3a3fa1a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -565,8 +565,7 @@ void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); - if (eax > 0) - c->x86_capability[9] = ebx; + c->x86_capability[9] = ebx; } /* AMD-defined flags: level 0x80000001 */ -- cgit v1.2.3 From 724a92ee45c04cb9d82884a856b03b1e594d9de1 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Tue, 17 May 2011 15:29:10 -0700 Subject: x86, cpufeature: Add CPU feature bit for enhanced REP MOVSB/STOSB Intel processors are adding enhancements to REP MOVSB/STOSB and the use of REP MOVSB/STOSB for optimal memcpy/memset or similar functions is recommended. Enhancement availability is indicated by CPUID.7.0.EBX[9] (Enhanced REP MOVSB/ STOSB). Signed-off-by: Fenghua Yu Link: http://lkml.kernel.org/r/1305671358-14478-2-git-send-email-fenghua.yu@intel.com Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/cpufeature.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 91f3e087cf2..7f2f7b12329 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -195,6 +195,7 @@ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ +#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ #if defined(__KERNEL__) && !defined(__ASSEMBLY__) -- cgit v1.2.3 From 161ec53c702ce9df2f439804dfb9331807066daa Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Tue, 17 May 2011 15:29:11 -0700 Subject: x86, mem, intel: Initialize Enhanced REP MOVSB/STOSB If kernel intends to use enhanced REP MOVSB/STOSB, it must ensure IA32_MISC_ENABLE.Fast_String_Enable (bit 0) is set and CPUID.(EAX=07H, ECX=0H): EBX[bit 9] also reports 1. Signed-off-by: Fenghua Yu Link: http://lkml.kernel.org/r/1305671358-14478-3-git-send-email-fenghua.yu@intel.com Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/intel.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index df86bc8c859..fc73a34ba8c 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -29,10 +29,10 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) { + u64 misc_enable; + /* Unmask CPUID levels if masked: */ if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { - u64 misc_enable; - rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { @@ -118,8 +118,6 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) * (model 2) with the same problem. */ if (c->x86 == 15) { - u64 misc_enable; - rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { @@ -130,6 +128,19 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) } } #endif + + /* + * If fast string is not enabled in IA32_MISC_ENABLE for any reason, + * clear the fast string and enhanced fast string CPU capabilities. + */ + if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { + rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); + if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) { + printk(KERN_INFO "Disabled fast string operations\n"); + setup_clear_cpu_cap(X86_FEATURE_REP_GOOD); + setup_clear_cpu_cap(X86_FEATURE_ERMS); + } + } } #ifdef CONFIG_X86_32 -- cgit v1.2.3 From 509731336313b3799cf03071d72c64fa6383895e Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Tue, 17 May 2011 15:29:12 -0700 Subject: x86, alternative, doc: Add comment for applying alternatives order Some string operation functions may be patched twice, e.g. on enhanced REP MOVSB /STOSB processors, memcpy is patched first by fast string alternative function, then it is patched by enhanced REP MOVSB/STOSB alternative function. Add comment for applying alternatives order to warn people who may change the applying alternatives order for any reason. [ Documentation-only patch ] Signed-off-by: Fenghua Yu Link: http://lkml.kernel.org/r/1305671358-14478-4-git-send-email-fenghua.yu@intel.com Signed-off-by: H. Peter Anvin --- arch/x86/kernel/alternative.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 4a234677e21..f4fe15ddcf9 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -210,6 +210,15 @@ void __init_or_module apply_alternatives(struct alt_instr *start, u8 insnbuf[MAX_PATCH_LEN]; DPRINTK("%s: alt table %p -> %p\n", __func__, start, end); + /* + * The scan order should be from start to end. A later scanned + * alternative code can overwrite a previous scanned alternative code. + * Some kernel functions (e.g. memcpy, memset, etc) use this order to + * patch code. + * + * So be careful if you want to change the scan order to any other + * order. + */ for (a = start; a < end; a++) { u8 *instr = a->instr; BUG_ON(a->replacementlen > a->instrlen); -- cgit v1.2.3 From 9072d11da15a71e086eab3b5085184f2c1d06913 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Tue, 17 May 2011 15:29:13 -0700 Subject: x86, alternative: Add altinstruction_entry macro Add altinstruction_entry macro to generate .altinstructions section entries from assembly code. This should be less failure-prone than open-coding. Signed-off-by: Fenghua Yu Link: http://lkml.kernel.org/r/1305671358-14478-5-git-send-email-fenghua.yu@intel.com Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/alternative-asm.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h index a63a68be1cc..94d420b360d 100644 --- a/arch/x86/include/asm/alternative-asm.h +++ b/arch/x86/include/asm/alternative-asm.h @@ -15,4 +15,13 @@ .endm #endif +.macro altinstruction_entry orig alt feature orig_len alt_len + .align 8 + .quad \orig + .quad \alt + .word \feature + .byte \orig_len + .byte \alt_len +.endm + #endif /* __ASSEMBLY__ */ -- cgit v1.2.3 From e365c9df2f2f001450decf9512412d2d5bd1cdef Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Tue, 17 May 2011 15:29:14 -0700 Subject: x86, mem: clear_page_64.S: Support clear_page() with enhanced REP MOVSB/STOSB Intel processors are adding enhancements to REP MOVSB/STOSB and the use of REP MOVSB/STOSB for optimal memcpy/memset or similar functions is recommended. Enhancement availability is indicated by CPUID.7.0.EBX[9] (Enhanced REP MOVSB/ STOSB). Support clear_page() with rep stosb for processor supporting enhanced REP MOVSB /STOSB. On processors supporting enhanced REP MOVSB/STOSB, the alternative clear_page_c_e function using enhanced REP STOSB overrides the original function and the fast string function. Signed-off-by: Fenghua Yu Link: http://lkml.kernel.org/r/1305671358-14478-6-git-send-email-fenghua.yu@intel.com Signed-off-by: H. Peter Anvin --- arch/x86/lib/clear_page_64.S | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S index aa4326bfb24..f2145cfa12a 100644 --- a/arch/x86/lib/clear_page_64.S +++ b/arch/x86/lib/clear_page_64.S @@ -1,5 +1,6 @@ #include #include +#include /* * Zero a page. @@ -14,6 +15,15 @@ ENTRY(clear_page_c) CFI_ENDPROC ENDPROC(clear_page_c) +ENTRY(clear_page_c_e) + CFI_STARTPROC + movl $4096,%ecx + xorl %eax,%eax + rep stosb + ret + CFI_ENDPROC +ENDPROC(clear_page_c_e) + ENTRY(clear_page) CFI_STARTPROC xorl %eax,%eax @@ -38,21 +48,26 @@ ENTRY(clear_page) .Lclear_page_end: ENDPROC(clear_page) - /* Some CPUs run faster using the string instructions. - It is also a lot simpler. Use this when possible */ + /* + * Some CPUs support enhanced REP MOVSB/STOSB instructions. + * It is recommended to use this when possible. + * If enhanced REP MOVSB/STOSB is not available, try to use fast string. + * Otherwise, use original function. + * + */ #include .section .altinstr_replacement,"ax" 1: .byte 0xeb /* jmp */ .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */ -2: +2: .byte 0xeb /* jmp */ + .byte (clear_page_c_e - clear_page) - (3f - 2b) /* offset */ +3: .previous .section .altinstructions,"a" - .align 8 - .quad clear_page - .quad 1b - .word X86_FEATURE_REP_GOOD - .byte .Lclear_page_end - clear_page - .byte 2b - 1b + altinstruction_entry clear_page,1b,X86_FEATURE_REP_GOOD,\ + .Lclear_page_end-clear_page, 2b-1b + altinstruction_entry clear_page,2b,X86_FEATURE_ERMS, \ + .Lclear_page_end-clear_page,3b-2b .previous -- cgit v1.2.3 From 4307bec9344aed83f8107c3eb4285bd9d218fc10 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Tue, 17 May 2011 15:29:15 -0700 Subject: x86, mem: copy_user_64.S: Support copy_to/from_user by enhanced REP MOVSB/STOSB Support copy_to_user/copy_from_user() by enhanced REP MOVSB/STOSB. On processors supporting enhanced REP MOVSB/STOSB, the alternative copy_user_enhanced_fast_string function using enhanced rep movsb overrides the original function and the fast string function. Signed-off-by: Fenghua Yu Link: http://lkml.kernel.org/r/1305671358-14478-7-git-send-email-fenghua.yu@intel.com Signed-off-by: H. Peter Anvin --- arch/x86/lib/copy_user_64.S | 65 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 55 insertions(+), 10 deletions(-) diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 99e48261519..d17a1170bbf 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -15,23 +15,30 @@ #include #include #include +#include - .macro ALTERNATIVE_JUMP feature,orig,alt +/* + * By placing feature2 after feature1 in altinstructions section, we logically + * implement: + * If CPU has feature2, jmp to alt2 is used + * else if CPU has feature1, jmp to alt1 is used + * else jmp to orig is used. + */ + .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2 0: .byte 0xe9 /* 32bit jump */ .long \orig-1f /* by default jump to orig */ 1: .section .altinstr_replacement,"ax" 2: .byte 0xe9 /* near jump with 32bit immediate */ - .long \alt-1b /* offset */ /* or alternatively to alt */ + .long \alt1-1b /* offset */ /* or alternatively to alt1 */ +3: .byte 0xe9 /* near jump with 32bit immediate */ + .long \alt2-1b /* offset */ /* or alternatively to alt2 */ .previous + .section .altinstructions,"a" - .align 8 - .quad 0b - .quad 2b - .word \feature /* when feature is set */ - .byte 5 - .byte 5 + altinstruction_entry 0b,2b,\feature1,5,5 + altinstruction_entry 0b,3b,\feature2,5,5 .previous .endm @@ -73,7 +80,9 @@ ENTRY(_copy_to_user) jc bad_to_user cmpq TI_addr_limit(%rax),%rcx jae bad_to_user - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string + ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \ + copy_user_generic_unrolled,copy_user_generic_string, \ + copy_user_enhanced_fast_string CFI_ENDPROC ENDPROC(_copy_to_user) @@ -86,7 +95,9 @@ ENTRY(_copy_from_user) jc bad_from_user cmpq TI_addr_limit(%rax),%rcx jae bad_from_user - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string + ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \ + copy_user_generic_unrolled,copy_user_generic_string, \ + copy_user_enhanced_fast_string CFI_ENDPROC ENDPROC(_copy_from_user) @@ -255,3 +266,37 @@ ENTRY(copy_user_generic_string) .previous CFI_ENDPROC ENDPROC(copy_user_generic_string) + +/* + * Some CPUs are adding enhanced REP MOVSB/STOSB instructions. + * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled. + * + * Input: + * rdi destination + * rsi source + * rdx count + * + * Output: + * eax uncopied bytes or 0 if successful. + */ +ENTRY(copy_user_enhanced_fast_string) + CFI_STARTPROC + andl %edx,%edx + jz 2f + movl %edx,%ecx +1: rep + movsb +2: xorl %eax,%eax + ret + + .section .fixup,"ax" +12: movl %ecx,%edx /* ecx is zerorest also */ + jmp copy_user_handle_tail + .previous + + .section __ex_table,"a" + .align 8 + .quad 1b,12b + .previous + CFI_ENDPROC +ENDPROC(copy_user_enhanced_fast_string) -- cgit v1.2.3 From 101068c1f4a947ffa08f2782c78e40097300754d Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Tue, 17 May 2011 15:29:16 -0700 Subject: x86, mem: memcpy_64.S: Optimize memcpy by enhanced REP MOVSB/STOSB Support memcpy() with enhanced rep movsb. On processors supporting enhanced rep movsb, the alternative memcpy() function using enhanced rep movsb overrides the original function and the fast string function. Signed-off-by: Fenghua Yu Link: http://lkml.kernel.org/r/1305671358-14478-8-git-send-email-fenghua.yu@intel.com Signed-off-by: H. Peter Anvin --- arch/x86/lib/memcpy_64.S | 45 ++++++++++++++++++++++++++++++++------------- 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 75ef61e35e3..daab21dae2d 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -4,6 +4,7 @@ #include #include +#include /* * memcpy - Copy a memory block. @@ -37,6 +38,23 @@ .Lmemcpy_e: .previous +/* + * memcpy_c_e() - enhanced fast string memcpy. This is faster and simpler than + * memcpy_c. Use memcpy_c_e when possible. + * + * This gets patched over the unrolled variant (below) via the + * alternative instructions framework: + */ + .section .altinstr_replacement, "ax", @progbits +.Lmemcpy_c_e: + movq %rdi, %rax + + movl %edx, %ecx + rep movsb + ret +.Lmemcpy_e_e: + .previous + ENTRY(__memcpy) ENTRY(memcpy) CFI_STARTPROC @@ -171,21 +189,22 @@ ENDPROC(memcpy) ENDPROC(__memcpy) /* - * Some CPUs run faster using the string copy instructions. - * It is also a lot simpler. Use this when possible: - */ - - .section .altinstructions, "a" - .align 8 - .quad memcpy - .quad .Lmemcpy_c - .word X86_FEATURE_REP_GOOD - - /* + * Some CPUs are adding enhanced REP MOVSB/STOSB feature + * If the feature is supported, memcpy_c_e() is the first choice. + * If enhanced rep movsb copy is not available, use fast string copy + * memcpy_c() when possible. This is faster and code is simpler than + * original memcpy(). + * Otherwise, original memcpy() is used. + * In .altinstructions section, ERMS feature is placed after REG_GOOD + * feature to implement the right patch order. + * * Replace only beginning, memcpy is used to apply alternatives, * so it is silly to overwrite itself with nops - reboot is the * only outcome... */ - .byte .Lmemcpy_e - .Lmemcpy_c - .byte .Lmemcpy_e - .Lmemcpy_c + .section .altinstructions, "a" + altinstruction_entry memcpy,.Lmemcpy_c,X86_FEATURE_REP_GOOD,\ + .Lmemcpy_e-.Lmemcpy_c,.Lmemcpy_e-.Lmemcpy_c + altinstruction_entry memcpy,.Lmemcpy_c_e,X86_FEATURE_ERMS, \ + .Lmemcpy_e_e-.Lmemcpy_c_e,.Lmemcpy_e_e-.Lmemcpy_c_e .previous -- cgit v1.2.3 From 057e05c1d6440117875f455e59da8691e08f65d5 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Tue, 17 May 2011 15:29:17 -0700 Subject: x86, mem: memmove_64.S: Optimize memmove by enhanced REP MOVSB/STOSB Support memmove() by enhanced rep movsb. On processors supporting enhanced REP MOVSB/STOSB, the alternative memmove() function using enhanced rep movsb overrides the original function. The patch doesn't change the backward memmove case to use enhanced rep movsb. Signed-off-by: Fenghua Yu Link: http://lkml.kernel.org/r/1305671358-14478-9-git-send-email-fenghua.yu@intel.com Signed-off-by: H. Peter Anvin --- arch/x86/lib/memmove_64.S | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index 0ecb8433e5a..d0ec9c2936d 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S @@ -8,6 +8,7 @@ #define _STRING_C #include #include +#include #undef memmove @@ -24,6 +25,7 @@ */ ENTRY(memmove) CFI_STARTPROC + /* Handle more 32bytes in loop */ mov %rdi, %rax cmp $0x20, %rdx @@ -31,8 +33,13 @@ ENTRY(memmove) /* Decide forward/backward copy mode */ cmp %rdi, %rsi - jb 2f + jge .Lmemmove_begin_forward + mov %rsi, %r8 + add %rdx, %r8 + cmp %rdi, %r8 + jg 2f +.Lmemmove_begin_forward: /* * movsq instruction have many startup latency * so we handle small size by general register. @@ -78,6 +85,8 @@ ENTRY(memmove) rep movsq movq %r11, (%r10) jmp 13f +.Lmemmove_end_forward: + /* * Handle data backward by movsq. */ @@ -194,4 +203,22 @@ ENTRY(memmove) 13: retq CFI_ENDPROC + + .section .altinstr_replacement,"ax" +.Lmemmove_begin_forward_efs: + /* Forward moving data. */ + movq %rdx, %rcx + rep movsb + retq +.Lmemmove_end_forward_efs: + .previous + + .section .altinstructions,"a" + .align 8 + .quad .Lmemmove_begin_forward + .quad .Lmemmove_begin_forward_efs + .word X86_FEATURE_ERMS + .byte .Lmemmove_end_forward-.Lmemmove_begin_forward + .byte .Lmemmove_end_forward_efs-.Lmemmove_begin_forward_efs + .previous ENDPROC(memmove) -- cgit v1.2.3 From 2f19e06ac30771c7cb96fd61d8aeacfa74dac21c Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Tue, 17 May 2011 15:29:18 -0700 Subject: x86, mem: memset_64.S: Optimize memset by enhanced REP MOVSB/STOSB Support memset() with enhanced rep stosb. On processors supporting enhanced REP MOVSB/STOSB, the alternative memset_c_e function using enhanced rep stosb overrides the fast string alternative memset_c and the original function. Signed-off-by: Fenghua Yu Link: http://lkml.kernel.org/r/1305671358-14478-10-git-send-email-fenghua.yu@intel.com Signed-off-by: H. Peter Anvin --- arch/x86/lib/memset_64.S | 54 +++++++++++++++++++++++++++++++++++++----------- 1 file changed, 42 insertions(+), 12 deletions(-) diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S index 09d34426965..79bd454b78a 100644 --- a/arch/x86/lib/memset_64.S +++ b/arch/x86/lib/memset_64.S @@ -2,9 +2,13 @@ #include #include +#include +#include /* - * ISO C memset - set a memory block to a byte value. + * ISO C memset - set a memory block to a byte value. This function uses fast + * string to get better performance than the original function. The code is + * simpler and shorter than the orignal function as well. * * rdi destination * rsi value (char) @@ -31,6 +35,28 @@ .Lmemset_e: .previous +/* + * ISO C memset - set a memory block to a byte value. This function uses + * enhanced rep stosb to override the fast string function. + * The code is simpler and shorter than the fast string function as well. + * + * rdi destination + * rsi value (char) + * rdx count (bytes) + * + * rax original destination + */ + .section .altinstr_replacement, "ax", @progbits +.Lmemset_c_e: + movq %rdi,%r9 + movb %sil,%al + movl %edx,%ecx + rep stosb + movq %r9,%rax + ret +.Lmemset_e_e: + .previous + ENTRY(memset) ENTRY(__memset) CFI_STARTPROC @@ -112,16 +138,20 @@ ENTRY(__memset) ENDPROC(memset) ENDPROC(__memset) - /* Some CPUs run faster using the string instructions. - It is also a lot simpler. Use this when possible */ - -#include - + /* Some CPUs support enhanced REP MOVSB/STOSB feature. + * It is recommended to use this when possible. + * + * If enhanced REP MOVSB/STOSB feature is not available, use fast string + * instructions. + * + * Otherwise, use original memset function. + * + * In .altinstructions section, ERMS feature is placed after REG_GOOD + * feature to implement the right patch order. + */ .section .altinstructions,"a" - .align 8 - .quad memset - .quad .Lmemset_c - .word X86_FEATURE_REP_GOOD - .byte .Lfinal - memset - .byte .Lmemset_e - .Lmemset_c + altinstruction_entry memset,.Lmemset_c,X86_FEATURE_REP_GOOD,\ + .Lfinal-memset,.Lmemset_e-.Lmemset_c + altinstruction_entry memset,.Lmemset_c_e,X86_FEATURE_ERMS, \ + .Lfinal-memset,.Lmemset_e_e-.Lmemset_c_e .previous -- cgit v1.2.3 From 26afb7c661080ae3f1f13ddf7f0c58c4f931c22b Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Thu, 12 May 2011 16:30:30 +0200 Subject: x86, 64-bit: Fix copy_[to/from]_user() checks for the userspace address limit As reported in BZ #30352: https://bugzilla.kernel.org/show_bug.cgi?id=30352 there's a kernel bug related to reading the last allowed page on x86_64. The _copy_to_user() and _copy_from_user() functions use the following check for address limit: if (buf + size >= limit) fail(); while it should be more permissive: if (buf + size > limit) fail(); That's because the size represents the number of bytes being read/write from/to buf address AND including the buf address. So the copy function will actually never touch the limit address even if "buf + size == limit". Following program fails to use the last page as buffer due to the wrong limit check: #include #include #include #define PAGE_SIZE (4096) #define LAST_PAGE ((void*)(0x7fffffffe000)) int main() { int fds[2], err; void * ptr = mmap(LAST_PAGE, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); assert(ptr == LAST_PAGE); err = socketpair(AF_LOCAL, SOCK_STREAM, 0, fds); assert(err == 0); err = send(fds[0], ptr, PAGE_SIZE, 0); perror("send"); assert(err == PAGE_SIZE); err = recv(fds[1], ptr, PAGE_SIZE, MSG_WAITALL); perror("recv"); assert(err == PAGE_SIZE); return 0; } The other place checking the addr limit is the access_ok() function, which is working properly. There's just a misleading comment for the __range_not_ok() macro - which this patch fixes as well. The last page of the user-space address range is a guard page and Brian Gerst observed that the guard page itself due to an erratum on K8 cpus (#121 Sequential Execution Across Non-Canonical Boundary Causes Processor Hang). However, the test code is using the last valid page before the guard page. The bug is that the last byte before the guard page can't be read because of the off-by-one error. The guard page is left in place. This bug would normally not show up because the last page is part of the process stack and never accessed via syscalls. Signed-off-by: Jiri Olsa Acked-by: Brian Gerst Acked-by: Linus Torvalds Cc: Link: http://lkml.kernel.org/r/1305210630-7136-1-git-send-email-jolsa@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/uaccess.h | 2 +- arch/x86/lib/copy_user_64.S | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index abd3e0ea762..99f0ad753f3 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -42,7 +42,7 @@ * Returns 0 if the range is valid, nonzero otherwise. * * This is equivalent to the following test: - * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64) + * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64) * * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... */ diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index d17a1170bbf..024840266ba 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -79,7 +79,7 @@ ENTRY(_copy_to_user) addq %rdx,%rcx jc bad_to_user cmpq TI_addr_limit(%rax),%rcx - jae bad_to_user + ja bad_to_user ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \ copy_user_generic_unrolled,copy_user_generic_string, \ copy_user_enhanced_fast_string @@ -94,7 +94,7 @@ ENTRY(_copy_from_user) addq %rdx,%rcx jc bad_from_user cmpq TI_addr_limit(%rax),%rcx - jae bad_from_user + ja bad_from_user ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \ copy_user_generic_unrolled,copy_user_generic_string, \ copy_user_enhanced_fast_string -- cgit v1.2.3 From b313207286a78abac19f1dd2721292eae598b0f5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 18 May 2011 21:00:44 +0200 Subject: perf bench, x86: Add alternatives-asm.h wrapper perf bench needs this to build the kernel's memcpy routine: In file included from bench/mem-memcpy-x86-64-asm.S:2:0: bench/../../../arch/x86/lib/memcpy_64.S:7:33: fatal error: asm/alternative-asm.h: No such file or directory Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Steven Rostedt Link: http://lkml.kernel.org/n/tip-c5d41xibgullk8h2280q4gv0@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/util/include/asm/alternative-asm.h | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 tools/perf/util/include/asm/alternative-asm.h diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h new file mode 100644 index 00000000000..6789d788d49 --- /dev/null +++ b/tools/perf/util/include/asm/alternative-asm.h @@ -0,0 +1,8 @@ +#ifndef _PERF_ASM_ALTERNATIVE_ASM_H +#define _PERF_ASM_ALTERNATIVE_ASM_H + +/* Just disable it so we can build arch/x86/lib/memcpy_64.S for perf bench: */ + +#define altinstruction_entry # + +#endif -- cgit v1.2.3 From b448c4e3ae6d20108dba1d7833f2c0d3dbad87ce Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 29 Apr 2011 15:12:32 -0400 Subject: ftrace: Replace FTRACE_FL_NOTRACE flag with a hash of ignored functions To prepare for the accounting system that will allow multiple users of the function tracer, having the FTRACE_FL_NOTRACE as a flag in the dyn_trace record does not make sense. All ftrace_ops will soon have a hash of functions they should trace and not trace. By making a global hash of functions not to trace makes this easier for the transition. Signed-off-by: Steven Rostedt --- include/linux/ftrace.h | 1 - kernel/trace/ftrace.c | 176 +++++++++++++++++++++++++++++++++++++++++-------- 2 files changed, 150 insertions(+), 27 deletions(-) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 32047449b30..fe0a90a0924 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -149,7 +149,6 @@ enum { FTRACE_FL_FREE = (1 << 0), FTRACE_FL_FILTER = (1 << 1), FTRACE_FL_ENABLED = (1 << 2), - FTRACE_FL_NOTRACE = (1 << 3), }; struct dyn_ftrace { diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index d3406346ced..04c002a491f 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -57,6 +57,7 @@ /* hash bits for specific function selection */ #define FTRACE_HASH_BITS 7 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) +#define FTRACE_HASH_MAX_BITS 10 /* ftrace_enabled is a method to turn ftrace on or off */ int ftrace_enabled __read_mostly; @@ -865,6 +866,22 @@ enum { FTRACE_START_FUNC_RET = (1 << 3), FTRACE_STOP_FUNC_RET = (1 << 4), }; +struct ftrace_func_entry { + struct hlist_node hlist; + unsigned long ip; +}; + +struct ftrace_hash { + unsigned long size_bits; + struct hlist_head *buckets; + unsigned long count; +}; + +static struct hlist_head notrace_buckets[1 << FTRACE_HASH_MAX_BITS]; +static struct ftrace_hash notrace_hash = { + .size_bits = FTRACE_HASH_MAX_BITS, + .buckets = notrace_buckets, +}; static int ftrace_filtered; @@ -889,6 +906,79 @@ static struct ftrace_page *ftrace_pages; static struct dyn_ftrace *ftrace_free_records; +static struct ftrace_func_entry * +ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) +{ + unsigned long key; + struct ftrace_func_entry *entry; + struct hlist_head *hhd; + struct hlist_node *n; + + if (!hash->count) + return NULL; + + if (hash->size_bits > 0) + key = hash_long(ip, hash->size_bits); + else + key = 0; + + hhd = &hash->buckets[key]; + + hlist_for_each_entry_rcu(entry, n, hhd, hlist) { + if (entry->ip == ip) + return entry; + } + return NULL; +} + +static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) +{ + struct ftrace_func_entry *entry; + struct hlist_head *hhd; + unsigned long key; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + if (hash->size_bits) + key = hash_long(ip, hash->size_bits); + else + key = 0; + + entry->ip = ip; + hhd = &hash->buckets[key]; + hlist_add_head(&entry->hlist, hhd); + hash->count++; + + return 0; +} + +static void +remove_hash_entry(struct ftrace_hash *hash, + struct ftrace_func_entry *entry) +{ + hlist_del(&entry->hlist); + kfree(entry); + hash->count--; +} + +static void ftrace_hash_clear(struct ftrace_hash *hash) +{ + struct hlist_head *hhd; + struct hlist_node *tp, *tn; + struct ftrace_func_entry *entry; + int size = 1 << hash->size_bits; + int i; + + for (i = 0; i < size; i++) { + hhd = &hash->buckets[i]; + hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) + remove_hash_entry(hash, entry); + } + FTRACE_WARN_ON(hash->count); +} + /* * This is a double for. Do not use 'break' to break out of the loop, * you must use a goto. @@ -1032,7 +1122,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) * If we want to enable it and filtering is on, enable it only if * it's filtered */ - if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) { + if (enable && !ftrace_lookup_ip(¬race_hash, rec->ip)) { if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER)) flag = FTRACE_FL_ENABLED; } @@ -1465,7 +1555,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) !(rec->flags & FTRACE_FL_FILTER)) || ((iter->flags & FTRACE_ITER_NOTRACE) && - !(rec->flags & FTRACE_FL_NOTRACE))) { + !ftrace_lookup_ip(¬race_hash, rec->ip))) { rec = NULL; goto retry; } @@ -1609,14 +1699,15 @@ static void ftrace_filter_reset(int enable) { struct ftrace_page *pg; struct dyn_ftrace *rec; - unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; mutex_lock(&ftrace_lock); - if (enable) + if (enable) { ftrace_filtered = 0; - do_for_each_ftrace_rec(pg, rec) { - rec->flags &= ~type; - } while_for_each_ftrace_rec(); + do_for_each_ftrace_rec(pg, rec) { + rec->flags &= ~FTRACE_FL_FILTER; + } while_for_each_ftrace_rec(); + } else + ftrace_hash_clear(¬race_hash); mutex_unlock(&ftrace_lock); } @@ -1716,13 +1807,36 @@ static int ftrace_match(char *str, char *regex, int len, int type) return matched; } -static void -update_record(struct dyn_ftrace *rec, unsigned long flag, int not) +static int +update_record(struct dyn_ftrace *rec, int enable, int not) { - if (not) - rec->flags &= ~flag; - else - rec->flags |= flag; + struct ftrace_func_entry *entry; + struct ftrace_hash *hash = ¬race_hash; + int ret = 0; + + if (enable) { + if (not) + rec->flags &= ~FTRACE_FL_FILTER; + else + rec->flags |= FTRACE_FL_FILTER; + } else { + if (not) { + /* Do nothing if it doesn't exist */ + entry = ftrace_lookup_ip(hash, rec->ip); + if (!entry) + return 0; + + remove_hash_entry(hash, entry); + } else { + /* Do nothing if it exists */ + entry = ftrace_lookup_ip(hash, rec->ip); + if (entry) + return 0; + + ret = add_hash_entry(hash, rec->ip); + } + } + return ret; } static int @@ -1754,16 +1868,14 @@ static int match_records(char *buff, int len, char *mod, int enable, int not) struct dyn_ftrace *rec; int type = MATCH_FULL; char *search = buff; - unsigned long flag; int found = 0; + int ret; if (len) { type = filter_parse_regex(buff, len, &search, ¬); search_len = strlen(search); } - flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; - mutex_lock(&ftrace_lock); if (unlikely(ftrace_disabled)) @@ -1772,7 +1884,11 @@ static int match_records(char *buff, int len, char *mod, int enable, int not) do_for_each_ftrace_rec(pg, rec) { if (ftrace_match_record(rec, mod, search, search_len, type)) { - update_record(rec, flag, not); + ret = update_record(rec, enable, not); + if (ret < 0) { + found = ret; + goto out_unlock; + } found = 1; } /* @@ -1821,6 +1937,7 @@ static int ftrace_mod_callback(char *func, char *cmd, char *param, int enable) { char *mod; + int ret = -EINVAL; /* * cmd == 'mod' because we only registered this func @@ -1832,15 +1949,19 @@ ftrace_mod_callback(char *func, char *cmd, char *param, int enable) /* we must have a module name */ if (!param) - return -EINVAL; + return ret; mod = strsep(¶m, ":"); if (!strlen(mod)) - return -EINVAL; + return ret; - if (ftrace_match_module_records(func, mod, enable)) - return 0; - return -EINVAL; + ret = ftrace_match_module_records(func, mod, enable); + if (!ret) + ret = -EINVAL; + if (ret < 0) + return ret; + + return 0; } static struct ftrace_func_command ftrace_mod_cmd = { @@ -2132,14 +2253,17 @@ static int ftrace_process_regex(char *buff, int len, int enable) { char *func, *command, *next = buff; struct ftrace_func_command *p; - int ret = -EINVAL; + int ret; func = strsep(&next, ":"); if (!next) { - if (ftrace_match_records(func, len, enable)) - return 0; - return ret; + ret = ftrace_match_records(func, len, enable); + if (!ret) + ret = -EINVAL; + if (ret < 0) + return ret; + return 0; } /* command found */ -- cgit v1.2.3 From 1cf41dd79993389b012e4542ab502ce36ae7343f Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 29 Apr 2011 20:59:51 -0400 Subject: ftrace: Use hash instead for FTRACE_FL_FILTER When multiple users are allowed to have their own set of functions to trace, having the FTRACE_FL_FILTER flag will not be enough to handle the accounting of those users. Each user will need their own set of functions. Replace the FTRACE_FL_FILTER with a filter_hash instead. This is temporary until the rest of the function filtering accounting gets in. Signed-off-by: Steven Rostedt --- include/linux/ftrace.h | 3 +- kernel/trace/ftrace.c | 151 ++++++++++++++++++++++--------------------------- 2 files changed, 70 insertions(+), 84 deletions(-) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index fe0a90a0924..52fc5d4e6ed 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -147,8 +147,7 @@ extern int ftrace_text_reserved(void *start, void *end); enum { FTRACE_FL_FREE = (1 << 0), - FTRACE_FL_FILTER = (1 << 1), - FTRACE_FL_ENABLED = (1 << 2), + FTRACE_FL_ENABLED = (1 << 1), }; struct dyn_ftrace { diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 04c002a491f..222eca4c302 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -883,7 +883,11 @@ static struct ftrace_hash notrace_hash = { .buckets = notrace_buckets, }; -static int ftrace_filtered; +static struct hlist_head filter_buckets[1 << FTRACE_HASH_MAX_BITS]; +static struct ftrace_hash filter_hash = { + .size_bits = FTRACE_HASH_MAX_BITS, + .buckets = filter_buckets, +}; static struct dyn_ftrace *ftrace_new_addrs; @@ -1123,7 +1127,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) * it's filtered */ if (enable && !ftrace_lookup_ip(¬race_hash, rec->ip)) { - if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER)) + if (!filter_hash.count || ftrace_lookup_ip(&filter_hash, rec->ip)) flag = FTRACE_FL_ENABLED; } @@ -1430,6 +1434,7 @@ struct ftrace_iterator { struct dyn_ftrace *func; struct ftrace_func_probe *probe; struct trace_parser parser; + struct ftrace_hash *hash; int hidx; int idx; unsigned flags; @@ -1552,7 +1557,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) if ((rec->flags & FTRACE_FL_FREE) || ((iter->flags & FTRACE_ITER_FILTER) && - !(rec->flags & FTRACE_FL_FILTER)) || + !(ftrace_lookup_ip(&filter_hash, rec->ip))) || ((iter->flags & FTRACE_ITER_NOTRACE) && !ftrace_lookup_ip(¬race_hash, rec->ip))) { @@ -1598,7 +1603,7 @@ static void *t_start(struct seq_file *m, loff_t *pos) * off, we can short cut and just print out that all * functions are enabled. */ - if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) { + if (iter->flags & FTRACE_ITER_FILTER && !filter_hash.count) { if (*pos > 0) return t_hash_start(m, pos); iter->flags |= FTRACE_ITER_PRINTALL; @@ -1695,24 +1700,16 @@ ftrace_avail_open(struct inode *inode, struct file *file) return ret; } -static void ftrace_filter_reset(int enable) +static void ftrace_filter_reset(struct ftrace_hash *hash) { - struct ftrace_page *pg; - struct dyn_ftrace *rec; - mutex_lock(&ftrace_lock); - if (enable) { - ftrace_filtered = 0; - do_for_each_ftrace_rec(pg, rec) { - rec->flags &= ~FTRACE_FL_FILTER; - } while_for_each_ftrace_rec(); - } else - ftrace_hash_clear(¬race_hash); + ftrace_hash_clear(hash); mutex_unlock(&ftrace_lock); } static int -ftrace_regex_open(struct inode *inode, struct file *file, int enable) +ftrace_regex_open(struct ftrace_hash *hash, int flag, + struct inode *inode, struct file *file) { struct ftrace_iterator *iter; int ret = 0; @@ -1729,15 +1726,16 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) return -ENOMEM; } + iter->hash = hash; + mutex_lock(&ftrace_regex_lock); if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) - ftrace_filter_reset(enable); + ftrace_filter_reset(hash); if (file->f_mode & FMODE_READ) { iter->pg = ftrace_pages_start; - iter->flags = enable ? FTRACE_ITER_FILTER : - FTRACE_ITER_NOTRACE; + iter->flags = flag; ret = seq_open(file, &show_ftrace_seq_ops); if (!ret) { @@ -1757,13 +1755,15 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) static int ftrace_filter_open(struct inode *inode, struct file *file) { - return ftrace_regex_open(inode, file, 1); + return ftrace_regex_open(&filter_hash, FTRACE_ITER_FILTER, + inode, file); } static int ftrace_notrace_open(struct inode *inode, struct file *file) { - return ftrace_regex_open(inode, file, 0); + return ftrace_regex_open(¬race_hash, FTRACE_ITER_NOTRACE, + inode, file); } static loff_t @@ -1808,33 +1808,24 @@ static int ftrace_match(char *str, char *regex, int len, int type) } static int -update_record(struct dyn_ftrace *rec, int enable, int not) +enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not) { struct ftrace_func_entry *entry; - struct ftrace_hash *hash = ¬race_hash; int ret = 0; - if (enable) { - if (not) - rec->flags &= ~FTRACE_FL_FILTER; - else - rec->flags |= FTRACE_FL_FILTER; - } else { - if (not) { - /* Do nothing if it doesn't exist */ - entry = ftrace_lookup_ip(hash, rec->ip); - if (!entry) - return 0; + entry = ftrace_lookup_ip(hash, rec->ip); + if (not) { + /* Do nothing if it doesn't exist */ + if (!entry) + return 0; - remove_hash_entry(hash, entry); - } else { - /* Do nothing if it exists */ - entry = ftrace_lookup_ip(hash, rec->ip); - if (entry) - return 0; + remove_hash_entry(hash, entry); + } else { + /* Do nothing if it exists */ + if (entry) + return 0; - ret = add_hash_entry(hash, rec->ip); - } + ret = add_hash_entry(hash, rec->ip); } return ret; } @@ -1861,7 +1852,9 @@ ftrace_match_record(struct dyn_ftrace *rec, char *mod, return ftrace_match(str, regex, len, type); } -static int match_records(char *buff, int len, char *mod, int enable, int not) +static int +match_records(struct ftrace_hash *hash, char *buff, + int len, char *mod, int not) { unsigned search_len = 0; struct ftrace_page *pg; @@ -1884,20 +1877,13 @@ static int match_records(char *buff, int len, char *mod, int enable, int not) do_for_each_ftrace_rec(pg, rec) { if (ftrace_match_record(rec, mod, search, search_len, type)) { - ret = update_record(rec, enable, not); + ret = enter_record(hash, rec, not); if (ret < 0) { found = ret; goto out_unlock; } found = 1; } - /* - * Only enable filtering if we have a function that - * is filtered on. - */ - if (enable && (rec->flags & FTRACE_FL_FILTER)) - ftrace_filtered = 1; - } while_for_each_ftrace_rec(); out_unlock: mutex_unlock(&ftrace_lock); @@ -1906,12 +1892,13 @@ static int match_records(char *buff, int len, char *mod, int enable, int not) } static int -ftrace_match_records(char *buff, int len, int enable) +ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) { - return match_records(buff, len, NULL, enable, 0); + return match_records(hash, buff, len, NULL, 0); } -static int ftrace_match_module_records(char *buff, char *mod, int enable) +static int +ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod) { int not = 0; @@ -1925,7 +1912,7 @@ static int ftrace_match_module_records(char *buff, char *mod, int enable) not = 1; } - return match_records(buff, strlen(buff), mod, enable, not); + return match_records(hash, buff, strlen(buff), mod, not); } /* @@ -1936,6 +1923,7 @@ static int ftrace_match_module_records(char *buff, char *mod, int enable) static int ftrace_mod_callback(char *func, char *cmd, char *param, int enable) { + struct ftrace_hash *hash; char *mod; int ret = -EINVAL; @@ -1955,7 +1943,12 @@ ftrace_mod_callback(char *func, char *cmd, char *param, int enable) if (!strlen(mod)) return ret; - ret = ftrace_match_module_records(func, mod, enable); + if (enable) + hash = &filter_hash; + else + hash = ¬race_hash; + + ret = ftrace_match_module_records(hash, func, mod); if (!ret) ret = -EINVAL; if (ret < 0) @@ -2253,12 +2246,18 @@ static int ftrace_process_regex(char *buff, int len, int enable) { char *func, *command, *next = buff; struct ftrace_func_command *p; + struct ftrace_hash *hash; int ret; + if (enable) + hash = &filter_hash; + else + hash = ¬race_hash; + func = strsep(&next, ":"); if (!next) { - ret = ftrace_match_records(func, len, enable); + ret = ftrace_match_records(hash, func, len); if (!ret) ret = -EINVAL; if (ret < 0) @@ -2340,16 +2339,16 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf, } static void -ftrace_set_regex(unsigned char *buf, int len, int reset, int enable) +ftrace_set_regex(struct ftrace_hash *hash, unsigned char *buf, int len, int reset) { if (unlikely(ftrace_disabled)) return; mutex_lock(&ftrace_regex_lock); if (reset) - ftrace_filter_reset(enable); + ftrace_filter_reset(hash); if (buf) - ftrace_match_records(buf, len, enable); + ftrace_match_records(hash, buf, len); mutex_unlock(&ftrace_regex_lock); } @@ -2364,7 +2363,7 @@ ftrace_set_regex(unsigned char *buf, int len, int reset, int enable) */ void ftrace_set_filter(unsigned char *buf, int len, int reset) { - ftrace_set_regex(buf, len, reset, 1); + ftrace_set_regex(&filter_hash, buf, len, reset); } /** @@ -2379,7 +2378,7 @@ void ftrace_set_filter(unsigned char *buf, int len, int reset) */ void ftrace_set_notrace(unsigned char *buf, int len, int reset) { - ftrace_set_regex(buf, len, reset, 0); + ftrace_set_regex(¬race_hash, buf, len, reset); } /* @@ -2431,22 +2430,22 @@ static void __init set_ftrace_early_graph(char *buf) } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -static void __init set_ftrace_early_filter(char *buf, int enable) +static void __init set_ftrace_early_filter(struct ftrace_hash *hash, char *buf) { char *func; while (buf) { func = strsep(&buf, ","); - ftrace_set_regex(func, strlen(func), 0, enable); + ftrace_set_regex(hash, func, strlen(func), 0); } } static void __init set_ftrace_early_filters(void) { if (ftrace_filter_buf[0]) - set_ftrace_early_filter(ftrace_filter_buf, 1); + set_ftrace_early_filter(&filter_hash, ftrace_filter_buf); if (ftrace_notrace_buf[0]) - set_ftrace_early_filter(ftrace_notrace_buf, 0); + set_ftrace_early_filter(¬race_hash, ftrace_notrace_buf); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (ftrace_graph_buf[0]) set_ftrace_early_graph(ftrace_graph_buf); @@ -2454,7 +2453,7 @@ static void __init set_ftrace_early_filters(void) } static int -ftrace_regex_release(struct inode *inode, struct file *file, int enable) +ftrace_regex_release(struct inode *inode, struct file *file) { struct seq_file *m = (struct seq_file *)file->private_data; struct ftrace_iterator *iter; @@ -2471,7 +2470,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) parser = &iter->parser; if (trace_parser_loaded(parser)) { parser->buffer[parser->idx] = 0; - ftrace_match_records(parser->buffer, parser->idx, enable); + ftrace_match_records(iter->hash, parser->buffer, parser->idx); } trace_parser_put(parser); @@ -2488,18 +2487,6 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) return 0; } -static int -ftrace_filter_release(struct inode *inode, struct file *file) -{ - return ftrace_regex_release(inode, file, 1); -} - -static int -ftrace_notrace_release(struct inode *inode, struct file *file) -{ - return ftrace_regex_release(inode, file, 0); -} - static const struct file_operations ftrace_avail_fops = { .open = ftrace_avail_open, .read = seq_read, @@ -2512,7 +2499,7 @@ static const struct file_operations ftrace_filter_fops = { .read = seq_read, .write = ftrace_filter_write, .llseek = ftrace_regex_lseek, - .release = ftrace_filter_release, + .release = ftrace_regex_release, }; static const struct file_operations ftrace_notrace_fops = { @@ -2520,7 +2507,7 @@ static const struct file_operations ftrace_notrace_fops = { .read = seq_read, .write = ftrace_notrace_write, .llseek = ftrace_regex_lseek, - .release = ftrace_notrace_release, + .release = ftrace_regex_release, }; #ifdef CONFIG_FUNCTION_GRAPH_TRACER -- cgit v1.2.3 From f45948e898e7bc76a73a468796d2ce80dd040058 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 2 May 2011 12:29:25 -0400 Subject: ftrace: Create a global_ops to hold the filter and notrace hashes Combine the filter and notrace hashes to be accessed by a single entity, the global_ops. The global_ops is a ftrace_ops structure that is passed to different functions that can read or modify the filtering of the function tracer. The ftrace_ops structure was modified to hold a filter and notrace hashes so that later patches may allow each ftrace_ops to have its own set of rules to what functions may be filtered. Signed-off-by: Steven Rostedt --- include/linux/ftrace.h | 10 ++++++-- kernel/trace/ftrace.c | 65 +++++++++++++++++++++++++++++++++++--------------- 2 files changed, 54 insertions(+), 21 deletions(-) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 52fc5d4e6ed..6658a51390f 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -29,9 +29,15 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); +struct ftrace_hash; + struct ftrace_ops { - ftrace_func_t func; - struct ftrace_ops *next; + ftrace_func_t func; + struct ftrace_ops *next; +#ifdef CONFIG_DYNAMIC_FTRACE + struct ftrace_hash *notrace_hash; + struct ftrace_hash *filter_hash; +#endif }; extern int function_trace_stop; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 222eca4c302..a517a6c4064 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -889,6 +889,12 @@ static struct ftrace_hash filter_hash = { .buckets = filter_buckets, }; +struct ftrace_ops global_ops = { + .func = ftrace_stub, + .notrace_hash = ¬race_hash, + .filter_hash = &filter_hash, +}; + static struct dyn_ftrace *ftrace_new_addrs; static DEFINE_MUTEX(ftrace_regex_lock); @@ -1112,6 +1118,7 @@ int ftrace_text_reserved(void *start, void *end) static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable) { + struct ftrace_ops *ops = &global_ops; unsigned long ftrace_addr; unsigned long flag = 0UL; @@ -1126,8 +1133,9 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) * If we want to enable it and filtering is on, enable it only if * it's filtered */ - if (enable && !ftrace_lookup_ip(¬race_hash, rec->ip)) { - if (!filter_hash.count || ftrace_lookup_ip(&filter_hash, rec->ip)) + if (enable && !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) { + if (!ops->filter_hash->count || + ftrace_lookup_ip(ops->filter_hash, rec->ip)) flag = FTRACE_FL_ENABLED; } @@ -1531,6 +1539,7 @@ static void * t_next(struct seq_file *m, void *v, loff_t *pos) { struct ftrace_iterator *iter = m->private; + struct ftrace_ops *ops = &global_ops; struct dyn_ftrace *rec = NULL; if (unlikely(ftrace_disabled)) @@ -1557,10 +1566,10 @@ t_next(struct seq_file *m, void *v, loff_t *pos) if ((rec->flags & FTRACE_FL_FREE) || ((iter->flags & FTRACE_ITER_FILTER) && - !(ftrace_lookup_ip(&filter_hash, rec->ip))) || + !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) || ((iter->flags & FTRACE_ITER_NOTRACE) && - !ftrace_lookup_ip(¬race_hash, rec->ip))) { + !ftrace_lookup_ip(ops->notrace_hash, rec->ip))) { rec = NULL; goto retry; } @@ -1584,6 +1593,7 @@ static void reset_iter_read(struct ftrace_iterator *iter) static void *t_start(struct seq_file *m, loff_t *pos) { struct ftrace_iterator *iter = m->private; + struct ftrace_ops *ops = &global_ops; void *p = NULL; loff_t l; @@ -1603,7 +1613,7 @@ static void *t_start(struct seq_file *m, loff_t *pos) * off, we can short cut and just print out that all * functions are enabled. */ - if (iter->flags & FTRACE_ITER_FILTER && !filter_hash.count) { + if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) { if (*pos > 0) return t_hash_start(m, pos); iter->flags |= FTRACE_ITER_PRINTALL; @@ -1708,10 +1718,11 @@ static void ftrace_filter_reset(struct ftrace_hash *hash) } static int -ftrace_regex_open(struct ftrace_hash *hash, int flag, +ftrace_regex_open(struct ftrace_ops *ops, int flag, struct inode *inode, struct file *file) { struct ftrace_iterator *iter; + struct ftrace_hash *hash; int ret = 0; if (unlikely(ftrace_disabled)) @@ -1726,6 +1737,11 @@ ftrace_regex_open(struct ftrace_hash *hash, int flag, return -ENOMEM; } + if (flag & FTRACE_ITER_NOTRACE) + hash = ops->notrace_hash; + else + hash = ops->filter_hash; + iter->hash = hash; mutex_lock(&ftrace_regex_lock); @@ -1755,14 +1771,14 @@ ftrace_regex_open(struct ftrace_hash *hash, int flag, static int ftrace_filter_open(struct inode *inode, struct file *file) { - return ftrace_regex_open(&filter_hash, FTRACE_ITER_FILTER, + return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER, inode, file); } static int ftrace_notrace_open(struct inode *inode, struct file *file) { - return ftrace_regex_open(¬race_hash, FTRACE_ITER_NOTRACE, + return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE, inode, file); } @@ -1923,6 +1939,7 @@ ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod) static int ftrace_mod_callback(char *func, char *cmd, char *param, int enable) { + struct ftrace_ops *ops = &global_ops; struct ftrace_hash *hash; char *mod; int ret = -EINVAL; @@ -1944,9 +1961,9 @@ ftrace_mod_callback(char *func, char *cmd, char *param, int enable) return ret; if (enable) - hash = &filter_hash; + hash = ops->filter_hash; else - hash = ¬race_hash; + hash = ops->notrace_hash; ret = ftrace_match_module_records(hash, func, mod); if (!ret) @@ -2245,14 +2262,15 @@ int unregister_ftrace_command(struct ftrace_func_command *cmd) static int ftrace_process_regex(char *buff, int len, int enable) { char *func, *command, *next = buff; + struct ftrace_ops *ops = &global_ops; struct ftrace_func_command *p; struct ftrace_hash *hash; int ret; if (enable) - hash = &filter_hash; + hash = ops->filter_hash; else - hash = ¬race_hash; + hash = ops->notrace_hash; func = strsep(&next, ":"); @@ -2339,11 +2357,19 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf, } static void -ftrace_set_regex(struct ftrace_hash *hash, unsigned char *buf, int len, int reset) +ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, + int reset, int enable) { + struct ftrace_hash *hash; + if (unlikely(ftrace_disabled)) return; + if (enable) + hash = ops->filter_hash; + else + hash = ops->notrace_hash; + mutex_lock(&ftrace_regex_lock); if (reset) ftrace_filter_reset(hash); @@ -2363,7 +2389,7 @@ ftrace_set_regex(struct ftrace_hash *hash, unsigned char *buf, int len, int rese */ void ftrace_set_filter(unsigned char *buf, int len, int reset) { - ftrace_set_regex(&filter_hash, buf, len, reset); + ftrace_set_regex(&global_ops, buf, len, reset, 1); } /** @@ -2378,7 +2404,7 @@ void ftrace_set_filter(unsigned char *buf, int len, int reset) */ void ftrace_set_notrace(unsigned char *buf, int len, int reset) { - ftrace_set_regex(¬race_hash, buf, len, reset); + ftrace_set_regex(&global_ops, buf, len, reset, 0); } /* @@ -2430,22 +2456,23 @@ static void __init set_ftrace_early_graph(char *buf) } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -static void __init set_ftrace_early_filter(struct ftrace_hash *hash, char *buf) +static void __init +set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable) { char *func; while (buf) { func = strsep(&buf, ","); - ftrace_set_regex(hash, func, strlen(func), 0); + ftrace_set_regex(ops, func, strlen(func), 0, enable); } } static void __init set_ftrace_early_filters(void) { if (ftrace_filter_buf[0]) - set_ftrace_early_filter(&filter_hash, ftrace_filter_buf); + set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1); if (ftrace_notrace_buf[0]) - set_ftrace_early_filter(¬race_hash, ftrace_notrace_buf); + set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (ftrace_graph_buf[0]) set_ftrace_early_graph(ftrace_graph_buf); -- cgit v1.2.3 From 33dc9b1267d59cef46ff0bd6bc043190845dc919 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 2 May 2011 17:34:47 -0400 Subject: ftrace: Separate hash allocation and assignment When filtering, allocate a hash to insert the function records. After the filtering is complete, assign it to the ftrace_ops structure. This allows the ftrace_ops structure to have a much smaller array of hash buckets instead of wasting a lot of memory. A read only empty_hash is created to be the minimum size that any ftrace_ops can point to. When a new hash is created, it has the following steps: o Allocate a default hash. o Walk the function records assigning the filtered records to the hash o Allocate a new hash with the appropriate size buckets o Move the entries from the default hash to the new hash. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 275 ++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 233 insertions(+), 42 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index a517a6c4064..46f08264980 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -57,7 +57,8 @@ /* hash bits for specific function selection */ #define FTRACE_HASH_BITS 7 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) -#define FTRACE_HASH_MAX_BITS 10 +#define FTRACE_HASH_DEFAULT_BITS 10 +#define FTRACE_HASH_MAX_BITS 12 /* ftrace_enabled is a method to turn ftrace on or off */ int ftrace_enabled __read_mostly; @@ -877,22 +878,22 @@ struct ftrace_hash { unsigned long count; }; -static struct hlist_head notrace_buckets[1 << FTRACE_HASH_MAX_BITS]; -static struct ftrace_hash notrace_hash = { - .size_bits = FTRACE_HASH_MAX_BITS, - .buckets = notrace_buckets, -}; - -static struct hlist_head filter_buckets[1 << FTRACE_HASH_MAX_BITS]; -static struct ftrace_hash filter_hash = { - .size_bits = FTRACE_HASH_MAX_BITS, - .buckets = filter_buckets, +/* + * We make these constant because no one should touch them, + * but they are used as the default "empty hash", to avoid allocating + * it all the time. These are in a read only section such that if + * anyone does try to modify it, it will cause an exception. + */ +static const struct hlist_head empty_buckets[1]; +static const struct ftrace_hash empty_hash = { + .buckets = (struct hlist_head *)empty_buckets, }; +#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) struct ftrace_ops global_ops = { .func = ftrace_stub, - .notrace_hash = ¬race_hash, - .filter_hash = &filter_hash, + .notrace_hash = EMPTY_HASH, + .filter_hash = EMPTY_HASH, }; static struct dyn_ftrace *ftrace_new_addrs; @@ -941,31 +942,38 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) return NULL; } -static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) +static void __add_hash_entry(struct ftrace_hash *hash, + struct ftrace_func_entry *entry) { - struct ftrace_func_entry *entry; struct hlist_head *hhd; unsigned long key; - entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; - if (hash->size_bits) - key = hash_long(ip, hash->size_bits); + key = hash_long(entry->ip, hash->size_bits); else key = 0; - entry->ip = ip; hhd = &hash->buckets[key]; hlist_add_head(&entry->hlist, hhd); hash->count++; +} + +static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) +{ + struct ftrace_func_entry *entry; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->ip = ip; + __add_hash_entry(hash, entry); return 0; } static void -remove_hash_entry(struct ftrace_hash *hash, +free_hash_entry(struct ftrace_hash *hash, struct ftrace_func_entry *entry) { hlist_del(&entry->hlist); @@ -973,6 +981,14 @@ remove_hash_entry(struct ftrace_hash *hash, hash->count--; } +static void +remove_hash_entry(struct ftrace_hash *hash, + struct ftrace_func_entry *entry) +{ + hlist_del(&entry->hlist); + hash->count--; +} + static void ftrace_hash_clear(struct ftrace_hash *hash) { struct hlist_head *hhd; @@ -981,14 +997,156 @@ static void ftrace_hash_clear(struct ftrace_hash *hash) int size = 1 << hash->size_bits; int i; + if (!hash->count) + return; + for (i = 0; i < size; i++) { hhd = &hash->buckets[i]; hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) - remove_hash_entry(hash, entry); + free_hash_entry(hash, entry); } FTRACE_WARN_ON(hash->count); } +static void free_ftrace_hash(struct ftrace_hash *hash) +{ + if (!hash || hash == EMPTY_HASH) + return; + ftrace_hash_clear(hash); + kfree(hash->buckets); + kfree(hash); +} + +static struct ftrace_hash *alloc_ftrace_hash(int size_bits) +{ + struct ftrace_hash *hash; + int size; + + hash = kzalloc(sizeof(*hash), GFP_KERNEL); + if (!hash) + return NULL; + + size = 1 << size_bits; + hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL); + + if (!hash->buckets) { + kfree(hash); + return NULL; + } + + hash->size_bits = size_bits; + + return hash; +} + +static struct ftrace_hash * +alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) +{ + struct ftrace_func_entry *entry; + struct ftrace_hash *new_hash; + struct hlist_node *tp; + int size; + int ret; + int i; + + new_hash = alloc_ftrace_hash(size_bits); + if (!new_hash) + return NULL; + + /* Empty hash? */ + if (!hash || !hash->count) + return new_hash; + + size = 1 << hash->size_bits; + for (i = 0; i < size; i++) { + hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) { + ret = add_hash_entry(new_hash, entry->ip); + if (ret < 0) + goto free_hash; + } + } + + FTRACE_WARN_ON(new_hash->count != hash->count); + + return new_hash; + + free_hash: + free_ftrace_hash(new_hash); + return NULL; +} + +static int +ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) +{ + struct ftrace_func_entry *entry; + struct hlist_node *tp, *tn; + struct hlist_head *hhd; + struct ftrace_hash *hash = *dst; + unsigned long key; + int size = src->count; + int bits = 0; + int i; + + /* + * If the new source is empty, just free dst and assign it + * the empty_hash. + */ + if (!src->count) { + free_ftrace_hash(*dst); + *dst = EMPTY_HASH; + return 0; + } + + ftrace_hash_clear(hash); + + /* + * Make the hash size about 1/2 the # found + */ + for (size /= 2; size; size >>= 1) + bits++; + + /* Don't allocate too much */ + if (bits > FTRACE_HASH_MAX_BITS) + bits = FTRACE_HASH_MAX_BITS; + + /* We can't modify the empty_hash */ + if (hash == EMPTY_HASH) { + /* Create a new hash */ + *dst = alloc_ftrace_hash(bits); + if (!*dst) { + *dst = EMPTY_HASH; + return -ENOMEM; + } + hash = *dst; + } else { + size = 1 << bits; + + /* Use the old hash, but create new buckets */ + hhd = kzalloc(sizeof(*hhd) * size, GFP_KERNEL); + if (!hhd) + return -ENOMEM; + + kfree(hash->buckets); + hash->buckets = hhd; + hash->size_bits = bits; + } + + size = 1 << src->size_bits; + for (i = 0; i < size; i++) { + hhd = &src->buckets[i]; + hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) { + if (bits > 0) + key = hash_long(entry->ip, bits); + else + key = 0; + remove_hash_entry(src, entry); + __add_hash_entry(hash, entry); + } + } + + return 0; +} + /* * This is a double for. Do not use 'break' to break out of the loop, * you must use a goto. @@ -1443,6 +1601,7 @@ struct ftrace_iterator { struct ftrace_func_probe *probe; struct trace_parser parser; struct ftrace_hash *hash; + struct ftrace_ops *ops; int hidx; int idx; unsigned flags; @@ -1742,22 +1901,37 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, else hash = ops->filter_hash; - iter->hash = hash; + iter->ops = ops; + iter->flags = flag; + + if (file->f_mode & FMODE_WRITE) { + mutex_lock(&ftrace_lock); + iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash); + mutex_unlock(&ftrace_lock); + + if (!iter->hash) { + trace_parser_put(&iter->parser); + kfree(iter); + return -ENOMEM; + } + } mutex_lock(&ftrace_regex_lock); + if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) - ftrace_filter_reset(hash); + ftrace_filter_reset(iter->hash); if (file->f_mode & FMODE_READ) { iter->pg = ftrace_pages_start; - iter->flags = flag; ret = seq_open(file, &show_ftrace_seq_ops); if (!ret) { struct seq_file *m = file->private_data; m->private = iter; } else { + /* Failed */ + free_ftrace_hash(iter->hash); trace_parser_put(&iter->parser); kfree(iter); } @@ -1835,7 +2009,7 @@ enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not) if (!entry) return 0; - remove_hash_entry(hash, entry); + free_hash_entry(hash, entry); } else { /* Do nothing if it exists */ if (entry) @@ -2259,19 +2433,13 @@ int unregister_ftrace_command(struct ftrace_func_command *cmd) return ret; } -static int ftrace_process_regex(char *buff, int len, int enable) +static int ftrace_process_regex(struct ftrace_hash *hash, + char *buff, int len, int enable) { char *func, *command, *next = buff; - struct ftrace_ops *ops = &global_ops; struct ftrace_func_command *p; - struct ftrace_hash *hash; int ret; - if (enable) - hash = ops->filter_hash; - else - hash = ops->notrace_hash; - func = strsep(&next, ":"); if (!next) { @@ -2328,7 +2496,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, if (read >= 0 && trace_parser_loaded(parser) && !trace_parser_cont(parser)) { - ret = ftrace_process_regex(parser->buffer, + ret = ftrace_process_regex(iter->hash, parser->buffer, parser->idx, enable); trace_parser_clear(parser); if (ret) @@ -2356,26 +2524,40 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf, return ftrace_regex_write(file, ubuf, cnt, ppos, 0); } -static void +static int ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, int reset, int enable) { + struct ftrace_hash **orig_hash; struct ftrace_hash *hash; + int ret; if (unlikely(ftrace_disabled)) - return; + return -ENODEV; if (enable) - hash = ops->filter_hash; + orig_hash = &ops->filter_hash; else - hash = ops->notrace_hash; + orig_hash = &ops->notrace_hash; + + hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); + if (!hash) + return -ENOMEM; mutex_lock(&ftrace_regex_lock); if (reset) ftrace_filter_reset(hash); if (buf) ftrace_match_records(hash, buf, len); + + mutex_lock(&ftrace_lock); + ret = ftrace_hash_move(orig_hash, hash); + mutex_unlock(&ftrace_lock); + mutex_unlock(&ftrace_regex_lock); + + free_ftrace_hash(hash); + return ret; } /** @@ -2484,7 +2666,9 @@ ftrace_regex_release(struct inode *inode, struct file *file) { struct seq_file *m = (struct seq_file *)file->private_data; struct ftrace_iterator *iter; + struct ftrace_hash **orig_hash; struct trace_parser *parser; + int ret; mutex_lock(&ftrace_regex_lock); if (file->f_mode & FMODE_READ) { @@ -2501,14 +2685,21 @@ ftrace_regex_release(struct inode *inode, struct file *file) } trace_parser_put(parser); - kfree(iter); if (file->f_mode & FMODE_WRITE) { + if (iter->flags & FTRACE_ITER_NOTRACE) + orig_hash = &iter->ops->notrace_hash; + else + orig_hash = &iter->ops->filter_hash; + mutex_lock(&ftrace_lock); - if (ftrace_start_up && ftrace_enabled) + ret = ftrace_hash_move(orig_hash, iter->hash); + if (!ret && ftrace_start_up && ftrace_enabled) ftrace_run_update_code(FTRACE_ENABLE_CALLS); mutex_unlock(&ftrace_lock); } + free_ftrace_hash(iter->hash); + kfree(iter); mutex_unlock(&ftrace_regex_lock); return 0; -- cgit v1.2.3 From ed926f9b35cda0988234c356e16a7cb30f4e5338 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 3 May 2011 13:25:24 -0400 Subject: ftrace: Use counters to enable functions to trace Every function has its own record that stores the instruction pointer and flags for the function to be traced. There are only two flags: enabled and free. The enabled flag states that tracing for the function has been enabled (actively traced), and the free flag states that the record no longer points to a function and can be used by new functions (loaded modules). These flags are now moved to the MSB of the flags (actually just the top 32bits). The rest of the bits (30 bits) are now used as a ref counter. Everytime a tracer register functions to trace, those functions will have its counter incremented. When tracing is enabled, to determine if a function should be traced, the counter is examined, and if it is non-zero it is set to trace. When a ftrace_ops is registered to trace functions, its hashes are examined. If the ftrace_ops filter_hash count is zero, then all functions are set to be traced, otherwise only the functions in the hash are to be traced. The exception to this is if a function is also in the ftrace_ops notrace_hash. Then that function's counter is not incremented for this ftrace_ops. Signed-off-by: Steven Rostedt --- include/linux/ftrace.h | 8 ++- kernel/trace/ftrace.c | 158 ++++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 148 insertions(+), 18 deletions(-) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 6658a51390f..ab1c46e70bb 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -37,6 +37,7 @@ struct ftrace_ops { #ifdef CONFIG_DYNAMIC_FTRACE struct ftrace_hash *notrace_hash; struct ftrace_hash *filter_hash; + unsigned long flags; #endif }; @@ -152,10 +153,13 @@ extern void unregister_ftrace_function_probe_all(char *glob); extern int ftrace_text_reserved(void *start, void *end); enum { - FTRACE_FL_FREE = (1 << 0), - FTRACE_FL_ENABLED = (1 << 1), + FTRACE_FL_ENABLED = (1 << 30), + FTRACE_FL_FREE = (1 << 31), }; +#define FTRACE_FL_MASK (0x3UL << 30) +#define FTRACE_REF_MAX ((1 << 30) - 1) + struct dyn_ftrace { union { unsigned long ip; /* address of mcount call-site */ diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 46f08264980..5dd332cc5aa 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -890,6 +890,10 @@ static const struct ftrace_hash empty_hash = { }; #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) +enum { + FTRACE_OPS_FL_ENABLED = 1, +}; + struct ftrace_ops global_ops = { .func = ftrace_stub, .notrace_hash = EMPTY_HASH, @@ -1161,6 +1165,105 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) } \ } +static void __ftrace_hash_rec_update(struct ftrace_ops *ops, + int filter_hash, + bool inc) +{ + struct ftrace_hash *hash; + struct ftrace_hash *other_hash; + struct ftrace_page *pg; + struct dyn_ftrace *rec; + int count = 0; + int all = 0; + + /* Only update if the ops has been registered */ + if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) + return; + + /* + * In the filter_hash case: + * If the count is zero, we update all records. + * Otherwise we just update the items in the hash. + * + * In the notrace_hash case: + * We enable the update in the hash. + * As disabling notrace means enabling the tracing, + * and enabling notrace means disabling, the inc variable + * gets inversed. + */ + if (filter_hash) { + hash = ops->filter_hash; + other_hash = ops->notrace_hash; + if (!hash->count) + all = 1; + } else { + inc = !inc; + hash = ops->notrace_hash; + other_hash = ops->filter_hash; + /* + * If the notrace hash has no items, + * then there's nothing to do. + */ + if (!hash->count) + return; + } + + do_for_each_ftrace_rec(pg, rec) { + int in_other_hash = 0; + int in_hash = 0; + int match = 0; + + if (all) { + /* + * Only the filter_hash affects all records. + * Update if the record is not in the notrace hash. + */ + if (!ftrace_lookup_ip(other_hash, rec->ip)) + match = 1; + } else { + in_hash = !!ftrace_lookup_ip(hash, rec->ip); + in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip); + + /* + * + */ + if (filter_hash && in_hash && !in_other_hash) + match = 1; + else if (!filter_hash && in_hash && + (in_other_hash || !other_hash->count)) + match = 1; + } + if (!match) + continue; + + if (inc) { + rec->flags++; + if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX)) + return; + } else { + if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0)) + return; + rec->flags--; + } + count++; + /* Shortcut, if we handled all records, we are done. */ + if (!all && count == hash->count) + return; + } while_for_each_ftrace_rec(); +} + +static void ftrace_hash_rec_disable(struct ftrace_ops *ops, + int filter_hash) +{ + __ftrace_hash_rec_update(ops, filter_hash, 0); +} + +static void ftrace_hash_rec_enable(struct ftrace_ops *ops, + int filter_hash) +{ + __ftrace_hash_rec_update(ops, filter_hash, 1); +} + static void ftrace_free_rec(struct dyn_ftrace *rec) { rec->freelist = ftrace_free_records; @@ -1276,26 +1379,24 @@ int ftrace_text_reserved(void *start, void *end) static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable) { - struct ftrace_ops *ops = &global_ops; unsigned long ftrace_addr; unsigned long flag = 0UL; ftrace_addr = (unsigned long)FTRACE_ADDR; /* - * If this record is not to be traced or we want to disable it, - * then disable it. + * If we are enabling tracing: * - * If we want to enable it and filtering is off, then enable it. + * If the record has a ref count, then we need to enable it + * because someone is using it. * - * If we want to enable it and filtering is on, enable it only if - * it's filtered + * Otherwise we make sure its disabled. + * + * If we are disabling tracing, then disable all records that + * are enabled. */ - if (enable && !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) { - if (!ops->filter_hash->count || - ftrace_lookup_ip(ops->filter_hash, rec->ip)) - flag = FTRACE_FL_ENABLED; - } + if (enable && (rec->flags & ~FTRACE_FL_MASK)) + flag = FTRACE_FL_ENABLED; /* If the state of this record hasn't changed, then do nothing */ if ((rec->flags & FTRACE_FL_ENABLED) == flag) @@ -1423,17 +1524,25 @@ static void ftrace_startup_enable(int command) static void ftrace_startup(int command) { + struct ftrace_ops *ops = &global_ops; + if (unlikely(ftrace_disabled)) return; ftrace_start_up++; command |= FTRACE_ENABLE_CALLS; + ops->flags |= FTRACE_OPS_FL_ENABLED; + if (ftrace_start_up == 1) + ftrace_hash_rec_enable(ops, 1); + ftrace_startup_enable(command); } static void ftrace_shutdown(int command) { + struct ftrace_ops *ops = &global_ops; + if (unlikely(ftrace_disabled)) return; @@ -1446,7 +1555,12 @@ static void ftrace_shutdown(int command) WARN_ON_ONCE(ftrace_start_up < 0); if (!ftrace_start_up) + ftrace_hash_rec_disable(ops, 1); + + if (!ftrace_start_up) { command |= FTRACE_DISABLE_CALLS; + ops->flags &= ~FTRACE_OPS_FL_ENABLED; + } if (saved_ftrace_func != ftrace_trace_function) { saved_ftrace_func = ftrace_trace_function; @@ -2668,6 +2782,7 @@ ftrace_regex_release(struct inode *inode, struct file *file) struct ftrace_iterator *iter; struct ftrace_hash **orig_hash; struct trace_parser *parser; + int filter_hash; int ret; mutex_lock(&ftrace_regex_lock); @@ -2687,15 +2802,26 @@ ftrace_regex_release(struct inode *inode, struct file *file) trace_parser_put(parser); if (file->f_mode & FMODE_WRITE) { - if (iter->flags & FTRACE_ITER_NOTRACE) - orig_hash = &iter->ops->notrace_hash; - else + filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); + + if (filter_hash) orig_hash = &iter->ops->filter_hash; + else + orig_hash = &iter->ops->notrace_hash; mutex_lock(&ftrace_lock); + /* + * Remove the current set, update the hash and add + * them back. + */ + ftrace_hash_rec_disable(iter->ops, filter_hash); ret = ftrace_hash_move(orig_hash, iter->hash); - if (!ret && ftrace_start_up && ftrace_enabled) - ftrace_run_update_code(FTRACE_ENABLE_CALLS); + if (!ret) { + ftrace_hash_rec_enable(iter->ops, filter_hash); + if (iter->ops->flags & FTRACE_OPS_FL_ENABLED + && ftrace_enabled) + ftrace_run_update_code(FTRACE_ENABLE_CALLS); + } mutex_unlock(&ftrace_lock); } free_ftrace_hash(iter->hash); -- cgit v1.2.3 From 647bcd03d5b2fb44fd9c9ef1a4f50c2eee8f779a Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 3 May 2011 14:39:21 -0400 Subject: ftrace: Add enabled_functions file Add the enabled_functions file that is used to show all the functions that have been enabled for tracing as well as their ref counts. This helps seeing if any function has been registered and what functions are being traced. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 49 insertions(+), 2 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 5dd332cc5aa..065f1e61e10 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1703,6 +1703,7 @@ enum { FTRACE_ITER_NOTRACE = (1 << 1), FTRACE_ITER_PRINTALL = (1 << 2), FTRACE_ITER_HASH = (1 << 3), + FTRACE_ITER_ENABLED = (1 << 4), }; #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ @@ -1842,7 +1843,11 @@ t_next(struct seq_file *m, void *v, loff_t *pos) !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) || ((iter->flags & FTRACE_ITER_NOTRACE) && - !ftrace_lookup_ip(ops->notrace_hash, rec->ip))) { + !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) || + + ((iter->flags & FTRACE_ITER_ENABLED) && + !(rec->flags & ~FTRACE_FL_MASK))) { + rec = NULL; goto retry; } @@ -1944,7 +1949,11 @@ static int t_show(struct seq_file *m, void *v) if (!rec) return 0; - seq_printf(m, "%ps\n", (void *)rec->ip); + seq_printf(m, "%ps", (void *)rec->ip); + if (iter->flags & FTRACE_ITER_ENABLED) + seq_printf(m, " (%ld)", + rec->flags & ~FTRACE_FL_MASK); + seq_printf(m, "\n"); return 0; } @@ -1983,6 +1992,34 @@ ftrace_avail_open(struct inode *inode, struct file *file) return ret; } +static int +ftrace_enabled_open(struct inode *inode, struct file *file) +{ + struct ftrace_iterator *iter; + int ret; + + if (unlikely(ftrace_disabled)) + return -ENODEV; + + iter = kzalloc(sizeof(*iter), GFP_KERNEL); + if (!iter) + return -ENOMEM; + + iter->pg = ftrace_pages_start; + iter->flags = FTRACE_ITER_ENABLED; + + ret = seq_open(file, &show_ftrace_seq_ops); + if (!ret) { + struct seq_file *m = file->private_data; + + m->private = iter; + } else { + kfree(iter); + } + + return ret; +} + static void ftrace_filter_reset(struct ftrace_hash *hash) { mutex_lock(&ftrace_lock); @@ -2838,6 +2875,13 @@ static const struct file_operations ftrace_avail_fops = { .release = seq_release_private, }; +static const struct file_operations ftrace_enabled_fops = { + .open = ftrace_enabled_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; + static const struct file_operations ftrace_filter_fops = { .open = ftrace_filter_open, .read = seq_read, @@ -3069,6 +3113,9 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) trace_create_file("available_filter_functions", 0444, d_tracer, NULL, &ftrace_avail_fops); + trace_create_file("enabled_functions", 0444, + d_tracer, NULL, &ftrace_enabled_fops); + trace_create_file("set_ftrace_filter", 0644, d_tracer, NULL, &ftrace_filter_fops); -- cgit v1.2.3 From bd69c30b1d08032d97ab0dabd7a1eb7fb73ca2b2 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 3 May 2011 21:55:54 -0400 Subject: ftrace: Add ops parameter to ftrace_startup/shutdown functions In order to allow different ops to enable different functions, the ftrace_startup() and ftrace_shutdown() functions need the ops parameter passed to them. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 065f1e61e10..8fef1d99bbb 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1522,10 +1522,8 @@ static void ftrace_startup_enable(int command) ftrace_run_update_code(command); } -static void ftrace_startup(int command) +static void ftrace_startup(struct ftrace_ops *ops, int command) { - struct ftrace_ops *ops = &global_ops; - if (unlikely(ftrace_disabled)) return; @@ -1539,10 +1537,8 @@ static void ftrace_startup(int command) ftrace_startup_enable(command); } -static void ftrace_shutdown(int command) +static void ftrace_shutdown(struct ftrace_ops *ops, int command) { - struct ftrace_ops *ops = &global_ops; - if (unlikely(ftrace_disabled)) return; @@ -2362,7 +2358,7 @@ static void __enable_ftrace_function_probe(void) return; __register_ftrace_function(&trace_probe_ops); - ftrace_startup(0); + ftrace_startup(&global_ops, 0); ftrace_probe_registered = 1; } @@ -2381,7 +2377,7 @@ static void __disable_ftrace_function_probe(void) /* no more funcs left */ __unregister_ftrace_function(&trace_probe_ops); - ftrace_shutdown(0); + ftrace_shutdown(&global_ops, 0); ftrace_probe_registered = 0; } @@ -3267,6 +3263,10 @@ void __init ftrace_init(void) #else +struct ftrace_ops global_ops = { + .func = ftrace_stub, +}; + static int __init ftrace_nodyn_init(void) { ftrace_enabled = 1; @@ -3277,8 +3277,8 @@ device_initcall(ftrace_nodyn_init); static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } static inline void ftrace_startup_enable(int command) { } /* Keep as macros so we do not need to define the commands */ -# define ftrace_startup(command) do { } while (0) -# define ftrace_shutdown(command) do { } while (0) +# define ftrace_startup(ops, command) do { } while (0) +# define ftrace_shutdown(ops, command) do { } while (0) # define ftrace_startup_sysctl() do { } while (0) # define ftrace_shutdown_sysctl() do { } while (0) #endif /* CONFIG_DYNAMIC_FTRACE */ @@ -3583,7 +3583,7 @@ int register_ftrace_function(struct ftrace_ops *ops) goto out_unlock; ret = __register_ftrace_function(ops); - ftrace_startup(0); + ftrace_startup(&global_ops, 0); out_unlock: mutex_unlock(&ftrace_lock); @@ -3602,7 +3602,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops) mutex_lock(&ftrace_lock); ret = __unregister_ftrace_function(ops); - ftrace_shutdown(0); + ftrace_shutdown(&global_ops, 0); mutex_unlock(&ftrace_lock); return ret; @@ -3825,7 +3825,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, ftrace_graph_return = retfunc; ftrace_graph_entry = entryfunc; - ftrace_startup(FTRACE_START_FUNC_RET); + ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); out: mutex_unlock(&ftrace_lock); @@ -3842,7 +3842,7 @@ void unregister_ftrace_graph(void) ftrace_graph_active--; ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; ftrace_graph_entry = ftrace_graph_entry_stub; - ftrace_shutdown(FTRACE_STOP_FUNC_RET); + ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); unregister_pm_notifier(&ftrace_suspend_notifier); unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); -- cgit v1.2.3 From 2b499381bc50ede01b3d8eab164ca2fad00655f0 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 3 May 2011 22:49:52 -0400 Subject: ftrace: Have global_ops store the functions that are to be traced This is a step towards each ops structure defining its own set of functions to trace. As the current code with pid's and such are specific to the global_ops, it is restructured to be used with the global ops. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 69 +++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 53 insertions(+), 16 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 8fef1d99bbb..dcce0bf9c84 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -91,6 +91,7 @@ static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; +static struct ftrace_ops global_ops; /* * Traverse the ftrace_list, invoking all entries. The reason that we @@ -153,7 +154,7 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) } #endif -static void update_ftrace_function(void) +static void update_global_ops(void) { ftrace_func_t func; @@ -173,6 +174,18 @@ static void update_ftrace_function(void) set_ftrace_pid_function(func); func = ftrace_pid_func; } + + global_ops.func = func; +} + +static void update_ftrace_function(void) +{ + ftrace_func_t func; + + update_global_ops(); + + func = global_ops.func; + #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST ftrace_trace_function = func; #else @@ -181,24 +194,19 @@ static void update_ftrace_function(void) #endif } -static int __register_ftrace_function(struct ftrace_ops *ops) +static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) { - ops->next = ftrace_list; + ops->next = *list; /* * We are entering ops into the ftrace_list but another * CPU might be walking that list. We need to make sure * the ops->next pointer is valid before another CPU sees * the ops pointer included into the ftrace_list. */ - rcu_assign_pointer(ftrace_list, ops); - - if (ftrace_enabled) - update_ftrace_function(); - - return 0; + rcu_assign_pointer(*list, ops); } -static int __unregister_ftrace_function(struct ftrace_ops *ops) +static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) { struct ftrace_ops **p; @@ -206,13 +214,12 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) * If we are removing the last function, then simply point * to the ftrace_stub. */ - if (ftrace_list == ops && ops->next == &ftrace_list_end) { - ftrace_trace_function = ftrace_stub; - ftrace_list = &ftrace_list_end; + if (*list == ops && ops->next == &ftrace_list_end) { + *list = &ftrace_list_end; return 0; } - for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) + for (p = list; *p != &ftrace_list_end; p = &(*p)->next) if (*p == ops) break; @@ -220,7 +227,37 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) return -1; *p = (*p)->next; + return 0; +} + +static int __register_ftrace_function(struct ftrace_ops *ops) +{ + if (ftrace_disabled) + return -ENODEV; + + if (FTRACE_WARN_ON(ops == &global_ops)) + return -EINVAL; + + add_ftrace_ops(&ftrace_list, ops); + if (ftrace_enabled) + update_ftrace_function(); + + return 0; +} +static int __unregister_ftrace_function(struct ftrace_ops *ops) +{ + int ret; + + if (ftrace_disabled) + return -ENODEV; + + if (FTRACE_WARN_ON(ops == &global_ops)) + return -EINVAL; + + ret = remove_ftrace_ops(&ftrace_list, ops); + if (ret < 0) + return ret; if (ftrace_enabled) update_ftrace_function(); @@ -894,7 +931,7 @@ enum { FTRACE_OPS_FL_ENABLED = 1, }; -struct ftrace_ops global_ops = { +static struct ftrace_ops global_ops = { .func = ftrace_stub, .notrace_hash = EMPTY_HASH, .filter_hash = EMPTY_HASH, @@ -3263,7 +3300,7 @@ void __init ftrace_init(void) #else -struct ftrace_ops global_ops = { +static struct ftrace_ops global_ops = { .func = ftrace_stub, }; -- cgit v1.2.3 From 07fd5515f3b5c20704707f63e7f4485b534508a8 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 5 May 2011 18:03:47 -0400 Subject: ftrace: Free hash with call_rcu_sched() When a hash is modified and might be in use, we need to perform a schedule RCU operation on it, as the hashes will soon be used directly in the function tracer callback. Cc: Paul E. McKenney Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 55 ++++++++++++++++++++++++++------------------------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index dcce0bf9c84..92b6fdf49ae 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -913,6 +913,7 @@ struct ftrace_hash { unsigned long size_bits; struct hlist_head *buckets; unsigned long count; + struct rcu_head rcu; }; /* @@ -1058,6 +1059,21 @@ static void free_ftrace_hash(struct ftrace_hash *hash) kfree(hash); } +static void __free_ftrace_hash_rcu(struct rcu_head *rcu) +{ + struct ftrace_hash *hash; + + hash = container_of(rcu, struct ftrace_hash, rcu); + free_ftrace_hash(hash); +} + +static void free_ftrace_hash_rcu(struct ftrace_hash *hash) +{ + if (!hash || hash == EMPTY_HASH) + return; + call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); +} + static struct ftrace_hash *alloc_ftrace_hash(int size_bits) { struct ftrace_hash *hash; @@ -1122,7 +1138,8 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) struct ftrace_func_entry *entry; struct hlist_node *tp, *tn; struct hlist_head *hhd; - struct ftrace_hash *hash = *dst; + struct ftrace_hash *old_hash; + struct ftrace_hash *new_hash; unsigned long key; int size = src->count; int bits = 0; @@ -1133,13 +1150,11 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) * the empty_hash. */ if (!src->count) { - free_ftrace_hash(*dst); - *dst = EMPTY_HASH; + free_ftrace_hash_rcu(*dst); + rcu_assign_pointer(*dst, EMPTY_HASH); return 0; } - ftrace_hash_clear(hash); - /* * Make the hash size about 1/2 the # found */ @@ -1150,27 +1165,9 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) if (bits > FTRACE_HASH_MAX_BITS) bits = FTRACE_HASH_MAX_BITS; - /* We can't modify the empty_hash */ - if (hash == EMPTY_HASH) { - /* Create a new hash */ - *dst = alloc_ftrace_hash(bits); - if (!*dst) { - *dst = EMPTY_HASH; - return -ENOMEM; - } - hash = *dst; - } else { - size = 1 << bits; - - /* Use the old hash, but create new buckets */ - hhd = kzalloc(sizeof(*hhd) * size, GFP_KERNEL); - if (!hhd) - return -ENOMEM; - - kfree(hash->buckets); - hash->buckets = hhd; - hash->size_bits = bits; - } + new_hash = alloc_ftrace_hash(bits); + if (!new_hash) + return -ENOMEM; size = 1 << src->size_bits; for (i = 0; i < size; i++) { @@ -1181,10 +1178,14 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) else key = 0; remove_hash_entry(src, entry); - __add_hash_entry(hash, entry); + __add_hash_entry(new_hash, entry); } } + old_hash = *dst; + rcu_assign_pointer(*dst, new_hash); + free_ftrace_hash_rcu(old_hash); + return 0; } -- cgit v1.2.3 From b848914ce39589d89ee0078a6d1ef452b464729e Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 4 May 2011 09:27:52 -0400 Subject: ftrace: Implement separate user function filtering ftrace_ops that are registered to trace functions can now be agnostic to each other in respect to what functions they trace. Each ops has their own hash of the functions they want to trace and a hash to what they do not want to trace. A empty hash for the functions they want to trace denotes all functions should be traced that are not in the notrace hash. Cc: Paul E. McKenney Signed-off-by: Steven Rostedt --- include/linux/ftrace.h | 7 +- kernel/trace/ftrace.c | 193 ++++++++++++++++++++++++++++++-------- kernel/trace/trace_functions.c | 2 + kernel/trace/trace_irqsoff.c | 1 + kernel/trace/trace_sched_wakeup.c | 1 + kernel/trace/trace_stack.c | 1 + 6 files changed, 166 insertions(+), 39 deletions(-) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index ab1c46e70bb..4609c0ece79 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -31,13 +31,18 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); struct ftrace_hash; +enum { + FTRACE_OPS_FL_ENABLED = 1 << 0, + FTRACE_OPS_FL_GLOBAL = 1 << 1, +}; + struct ftrace_ops { ftrace_func_t func; struct ftrace_ops *next; + unsigned long flags; #ifdef CONFIG_DYNAMIC_FTRACE struct ftrace_hash *notrace_hash; struct ftrace_hash *filter_hash; - unsigned long flags; #endif }; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 92b6fdf49ae..6c7e1df39b5 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -87,24 +87,29 @@ static struct ftrace_ops ftrace_list_end __read_mostly = .func = ftrace_stub, }; -static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; +static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; +static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; static struct ftrace_ops global_ops; +static void +ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); + /* - * Traverse the ftrace_list, invoking all entries. The reason that we + * Traverse the ftrace_global_list, invoking all entries. The reason that we * can use rcu_dereference_raw() is that elements removed from this list * are simply leaked, so there is no need to interact with a grace-period * mechanism. The rcu_dereference_raw() calls are needed to handle - * concurrent insertions into the ftrace_list. + * concurrent insertions into the ftrace_global_list. * * Silly Alpha and silly pointer-speculation compiler optimizations! */ -static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) +static void ftrace_global_list_func(unsigned long ip, + unsigned long parent_ip) { - struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/ + struct ftrace_ops *op = rcu_dereference_raw(ftrace_global_list); /*see above*/ while (op != &ftrace_list_end) { op->func(ip, parent_ip); @@ -163,11 +168,11 @@ static void update_global_ops(void) * function directly. Otherwise, we need to iterate over the * registered callers. */ - if (ftrace_list == &ftrace_list_end || - ftrace_list->next == &ftrace_list_end) - func = ftrace_list->func; + if (ftrace_global_list == &ftrace_list_end || + ftrace_global_list->next == &ftrace_list_end) + func = ftrace_global_list->func; else - func = ftrace_list_func; + func = ftrace_global_list_func; /* If we filter on pids, update to use the pid function */ if (!list_empty(&ftrace_pids)) { @@ -184,7 +189,11 @@ static void update_ftrace_function(void) update_global_ops(); - func = global_ops.func; + if (ftrace_ops_list == &ftrace_list_end || + ftrace_ops_list->next == &ftrace_list_end) + func = ftrace_ops_list->func; + else + func = ftrace_ops_list_func; #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST ftrace_trace_function = func; @@ -198,10 +207,10 @@ static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) { ops->next = *list; /* - * We are entering ops into the ftrace_list but another + * We are entering ops into the list but another * CPU might be walking that list. We need to make sure * the ops->next pointer is valid before another CPU sees - * the ops pointer included into the ftrace_list. + * the ops pointer included into the list. */ rcu_assign_pointer(*list, ops); } @@ -238,7 +247,18 @@ static int __register_ftrace_function(struct ftrace_ops *ops) if (FTRACE_WARN_ON(ops == &global_ops)) return -EINVAL; - add_ftrace_ops(&ftrace_list, ops); + if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) + return -EBUSY; + + if (ops->flags & FTRACE_OPS_FL_GLOBAL) { + int first = ftrace_global_list == &ftrace_list_end; + add_ftrace_ops(&ftrace_global_list, ops); + ops->flags |= FTRACE_OPS_FL_ENABLED; + if (first) + add_ftrace_ops(&ftrace_ops_list, &global_ops); + } else + add_ftrace_ops(&ftrace_ops_list, ops); + if (ftrace_enabled) update_ftrace_function(); @@ -252,12 +272,24 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) if (ftrace_disabled) return -ENODEV; + if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) + return -EBUSY; + if (FTRACE_WARN_ON(ops == &global_ops)) return -EINVAL; - ret = remove_ftrace_ops(&ftrace_list, ops); + if (ops->flags & FTRACE_OPS_FL_GLOBAL) { + ret = remove_ftrace_ops(&ftrace_global_list, ops); + if (!ret && ftrace_global_list == &ftrace_list_end) + ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops); + if (!ret) + ops->flags &= ~FTRACE_OPS_FL_ENABLED; + } else + ret = remove_ftrace_ops(&ftrace_ops_list, ops); + if (ret < 0) return ret; + if (ftrace_enabled) update_ftrace_function(); @@ -928,10 +960,6 @@ static const struct ftrace_hash empty_hash = { }; #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) -enum { - FTRACE_OPS_FL_ENABLED = 1, -}; - static struct ftrace_ops global_ops = { .func = ftrace_stub, .notrace_hash = EMPTY_HASH, @@ -1189,6 +1217,40 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) return 0; } +/* + * Test the hashes for this ops to see if we want to call + * the ops->func or not. + * + * It's a match if the ip is in the ops->filter_hash or + * the filter_hash does not exist or is empty, + * AND + * the ip is not in the ops->notrace_hash. + */ +static int +ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) +{ + struct ftrace_hash *filter_hash; + struct ftrace_hash *notrace_hash; + int ret; + + /* The hashes are freed with call_rcu_sched() */ + preempt_disable_notrace(); + + filter_hash = rcu_dereference_raw(ops->filter_hash); + notrace_hash = rcu_dereference_raw(ops->notrace_hash); + + if ((!filter_hash || !filter_hash->count || + ftrace_lookup_ip(filter_hash, ip)) && + (!notrace_hash || !notrace_hash->count || + !ftrace_lookup_ip(notrace_hash, ip))) + ret = 1; + else + ret = 0; + preempt_enable_notrace(); + + return ret; +} + /* * This is a double for. Do not use 'break' to break out of the loop, * you must use a goto. @@ -1232,7 +1294,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, if (filter_hash) { hash = ops->filter_hash; other_hash = ops->notrace_hash; - if (!hash->count) + if (!hash || !hash->count) all = 1; } else { inc = !inc; @@ -1242,7 +1304,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, * If the notrace hash has no items, * then there's nothing to do. */ - if (!hash->count) + if (hash && !hash->count) return; } @@ -1256,11 +1318,11 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, * Only the filter_hash affects all records. * Update if the record is not in the notrace hash. */ - if (!ftrace_lookup_ip(other_hash, rec->ip)) + if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip)) match = 1; } else { - in_hash = !!ftrace_lookup_ip(hash, rec->ip); - in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip); + in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip); + in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip); /* * @@ -1546,6 +1608,7 @@ static void ftrace_run_update_code(int command) static ftrace_func_t saved_ftrace_func; static int ftrace_start_up; +static int global_start_up; static void ftrace_startup_enable(int command) { @@ -1562,14 +1625,25 @@ static void ftrace_startup_enable(int command) static void ftrace_startup(struct ftrace_ops *ops, int command) { + bool hash_enable = true; + if (unlikely(ftrace_disabled)) return; ftrace_start_up++; command |= FTRACE_ENABLE_CALLS; + /* ops marked global share the filter hashes */ + if (ops->flags & FTRACE_OPS_FL_GLOBAL) { + ops = &global_ops; + /* Don't update hash if global is already set */ + if (global_start_up) + hash_enable = false; + global_start_up++; + } + ops->flags |= FTRACE_OPS_FL_ENABLED; - if (ftrace_start_up == 1) + if (hash_enable) ftrace_hash_rec_enable(ops, 1); ftrace_startup_enable(command); @@ -1577,6 +1651,8 @@ static void ftrace_startup(struct ftrace_ops *ops, int command) static void ftrace_shutdown(struct ftrace_ops *ops, int command) { + bool hash_disable = true; + if (unlikely(ftrace_disabled)) return; @@ -1588,13 +1664,25 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command) */ WARN_ON_ONCE(ftrace_start_up < 0); - if (!ftrace_start_up) + if (ops->flags & FTRACE_OPS_FL_GLOBAL) { + ops = &global_ops; + global_start_up--; + WARN_ON_ONCE(global_start_up < 0); + /* Don't update hash if global still has users */ + if (global_start_up) { + WARN_ON_ONCE(!ftrace_start_up); + hash_disable = false; + } + } + + if (hash_disable) ftrace_hash_rec_disable(ops, 1); - if (!ftrace_start_up) { - command |= FTRACE_DISABLE_CALLS; + if (ops != &global_ops || !global_start_up) ops->flags &= ~FTRACE_OPS_FL_ENABLED; - } + + if (!ftrace_start_up) + command |= FTRACE_DISABLE_CALLS; if (saved_ftrace_func != ftrace_trace_function) { saved_ftrace_func = ftrace_trace_function; @@ -2381,6 +2469,7 @@ static int ftrace_probe_registered; static void __enable_ftrace_function_probe(void) { + int ret; int i; if (ftrace_probe_registered) @@ -2395,13 +2484,16 @@ static void __enable_ftrace_function_probe(void) if (i == FTRACE_FUNC_HASHSIZE) return; - __register_ftrace_function(&trace_probe_ops); - ftrace_startup(&global_ops, 0); + ret = __register_ftrace_function(&trace_probe_ops); + if (!ret) + ftrace_startup(&trace_probe_ops, 0); + ftrace_probe_registered = 1; } static void __disable_ftrace_function_probe(void) { + int ret; int i; if (!ftrace_probe_registered) @@ -2414,8 +2506,10 @@ static void __disable_ftrace_function_probe(void) } /* no more funcs left */ - __unregister_ftrace_function(&trace_probe_ops); - ftrace_shutdown(&global_ops, 0); + ret = __unregister_ftrace_function(&trace_probe_ops); + if (!ret) + ftrace_shutdown(&trace_probe_ops, 0); + ftrace_probe_registered = 0; } @@ -3319,8 +3413,28 @@ static inline void ftrace_startup_enable(int command) { } # define ftrace_shutdown(ops, command) do { } while (0) # define ftrace_startup_sysctl() do { } while (0) # define ftrace_shutdown_sysctl() do { } while (0) + +static inline int +ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) +{ + return 1; +} + #endif /* CONFIG_DYNAMIC_FTRACE */ +static void +ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) +{ + /* see comment above ftrace_global_list_func */ + struct ftrace_ops *op = rcu_dereference_raw(ftrace_ops_list); + + while (op != &ftrace_list_end) { + if (ftrace_ops_test(op, ip)) + op->func(ip, parent_ip); + op = rcu_dereference_raw(op->next); + }; +} + static void clear_ftrace_swapper(void) { struct task_struct *p; @@ -3621,7 +3735,9 @@ int register_ftrace_function(struct ftrace_ops *ops) goto out_unlock; ret = __register_ftrace_function(ops); - ftrace_startup(&global_ops, 0); + if (!ret) + ftrace_startup(ops, 0); + out_unlock: mutex_unlock(&ftrace_lock); @@ -3640,7 +3756,8 @@ int unregister_ftrace_function(struct ftrace_ops *ops) mutex_lock(&ftrace_lock); ret = __unregister_ftrace_function(ops); - ftrace_shutdown(&global_ops, 0); + if (!ret) + ftrace_shutdown(ops, 0); mutex_unlock(&ftrace_lock); return ret; @@ -3670,11 +3787,11 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, ftrace_startup_sysctl(); /* we are starting ftrace again */ - if (ftrace_list != &ftrace_list_end) { - if (ftrace_list->next == &ftrace_list_end) - ftrace_trace_function = ftrace_list->func; + if (ftrace_ops_list != &ftrace_list_end) { + if (ftrace_ops_list->next == &ftrace_list_end) + ftrace_trace_function = ftrace_ops_list->func; else - ftrace_trace_function = ftrace_list_func; + ftrace_trace_function = ftrace_ops_list_func; } } else { diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 16aee4d44e8..8d0e1cc4e97 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -149,11 +149,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip) static struct ftrace_ops trace_ops __read_mostly = { .func = function_trace_call, + .flags = FTRACE_OPS_FL_GLOBAL, }; static struct ftrace_ops trace_stack_ops __read_mostly = { .func = function_stack_trace_call, + .flags = FTRACE_OPS_FL_GLOBAL, }; /* Our two options */ diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index a4969b47afc..c77424be284 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -153,6 +153,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) static struct ftrace_ops trace_ops __read_mostly = { .func = irqsoff_tracer_call, + .flags = FTRACE_OPS_FL_GLOBAL, }; #endif /* CONFIG_FUNCTION_TRACER */ diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 7319559ed59..f029dd4fd2c 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -129,6 +129,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) static struct ftrace_ops trace_ops __read_mostly = { .func = wakeup_tracer_call, + .flags = FTRACE_OPS_FL_GLOBAL, }; #endif /* CONFIG_FUNCTION_TRACER */ diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 4c5dead0c23..b0b53b8e4c2 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -133,6 +133,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) static struct ftrace_ops trace_ops __read_mostly = { .func = stack_trace_call, + .flags = FTRACE_OPS_FL_GLOBAL, }; static ssize_t -- cgit v1.2.3 From cdbe61bfe70440939e457fb4a8d0995eaaed17de Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 5 May 2011 21:14:55 -0400 Subject: ftrace: Allow dynamically allocated function tracers Now that functions may be selected individually, it only makes sense that we should allow dynamically allocated trace structures to be traced. This will allow perf to allocate a ftrace_ops structure at runtime and use it to pick and choose which functions that structure will trace. Note, a dynamically allocated ftrace_ops will always be called indirectly instead of being called directly from the mcount in entry.S. This is because there's no safe way to prevent mcount from being preempted before calling the function, unless we modify every entry.S to do so (not likely). Thus, dynamically allocated functions will now be called by the ftrace_ops_list_func() that loops through the ops that are allocated if there are more than one op allocated at a time. This loop is protected with a preempt_disable. To determine if an ftrace_ops structure is allocated or not, a new util function was added to the kernel/extable.c called core_kernel_data(), which returns 1 if the address is between _sdata and _edata. Cc: Paul E. McKenney Signed-off-by: Steven Rostedt --- include/linux/ftrace.h | 1 + include/linux/kernel.h | 1 + kernel/extable.c | 8 ++++++++ kernel/trace/ftrace.c | 37 ++++++++++++++++++++++++++++++------- 4 files changed, 40 insertions(+), 7 deletions(-) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 4609c0ece79..caba694a62b 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -34,6 +34,7 @@ struct ftrace_hash; enum { FTRACE_OPS_FL_ENABLED = 1 << 0, FTRACE_OPS_FL_GLOBAL = 1 << 1, + FTRACE_OPS_FL_DYNAMIC = 1 << 2, }; struct ftrace_ops { diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 00cec4dc0ae..f37ba716ef8 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -283,6 +283,7 @@ extern char *get_options(const char *str, int nints, int *ints); extern unsigned long long memparse(const char *ptr, char **retptr); extern int core_kernel_text(unsigned long addr); +extern int core_kernel_data(unsigned long addr); extern int __kernel_text_address(unsigned long addr); extern int kernel_text_address(unsigned long addr); extern int func_ptr_is_kernel_text(void *ptr); diff --git a/kernel/extable.c b/kernel/extable.c index 7f8f263f852..c2d625fcda7 100644 --- a/kernel/extable.c +++ b/kernel/extable.c @@ -72,6 +72,14 @@ int core_kernel_text(unsigned long addr) return 0; } +int core_kernel_data(unsigned long addr) +{ + if (addr >= (unsigned long)_sdata && + addr < (unsigned long)_edata) + return 1; + return 0; +} + int __kernel_text_address(unsigned long addr) { if (core_kernel_text(addr)) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 6c7e1df39b5..5b3ee04e39d 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -189,8 +189,14 @@ static void update_ftrace_function(void) update_global_ops(); + /* + * If we are at the end of the list and this ops is + * not dynamic, then have the mcount trampoline call + * the function directly + */ if (ftrace_ops_list == &ftrace_list_end || - ftrace_ops_list->next == &ftrace_list_end) + (ftrace_ops_list->next == &ftrace_list_end && + !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC))) func = ftrace_ops_list->func; else func = ftrace_ops_list_func; @@ -250,6 +256,9 @@ static int __register_ftrace_function(struct ftrace_ops *ops) if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) return -EBUSY; + if (!core_kernel_data((unsigned long)ops)) + ops->flags |= FTRACE_OPS_FL_DYNAMIC; + if (ops->flags & FTRACE_OPS_FL_GLOBAL) { int first = ftrace_global_list == &ftrace_list_end; add_ftrace_ops(&ftrace_global_list, ops); @@ -293,6 +302,13 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) if (ftrace_enabled) update_ftrace_function(); + /* + * Dynamic ops may be freed, we must make sure that all + * callers are done before leaving this function. + */ + if (ops->flags & FTRACE_OPS_FL_DYNAMIC) + synchronize_sched(); + return 0; } @@ -1225,6 +1241,9 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src) * the filter_hash does not exist or is empty, * AND * the ip is not in the ops->notrace_hash. + * + * This needs to be called with preemption disabled as + * the hashes are freed with call_rcu_sched(). */ static int ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) @@ -1233,9 +1252,6 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) struct ftrace_hash *notrace_hash; int ret; - /* The hashes are freed with call_rcu_sched() */ - preempt_disable_notrace(); - filter_hash = rcu_dereference_raw(ops->filter_hash); notrace_hash = rcu_dereference_raw(ops->notrace_hash); @@ -1246,7 +1262,6 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) ret = 1; else ret = 0; - preempt_enable_notrace(); return ret; } @@ -3425,14 +3440,20 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) { - /* see comment above ftrace_global_list_func */ - struct ftrace_ops *op = rcu_dereference_raw(ftrace_ops_list); + struct ftrace_ops *op; + /* + * Some of the ops may be dynamically allocated, + * they must be freed after a synchronize_sched(). + */ + preempt_disable_notrace(); + op = rcu_dereference_raw(ftrace_ops_list); while (op != &ftrace_list_end) { if (ftrace_ops_test(op, ip)) op->func(ip, parent_ip); op = rcu_dereference_raw(op->next); }; + preempt_enable_notrace(); } static void clear_ftrace_swapper(void) @@ -3743,6 +3764,7 @@ int register_ftrace_function(struct ftrace_ops *ops) mutex_unlock(&ftrace_lock); return ret; } +EXPORT_SYMBOL_GPL(register_ftrace_function); /** * unregister_ftrace_function - unregister a function for profiling. @@ -3762,6 +3784,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops) return ret; } +EXPORT_SYMBOL_GPL(unregister_ftrace_function); int ftrace_enable_sysctl(struct ctl_table *table, int write, -- cgit v1.2.3 From 936e074b286ae779f134312178dbab139ee7ea52 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 5 May 2011 22:54:01 -0400 Subject: ftrace: Modify ftrace_set_filter/notrace to take ops Since users of the function tracer can now pick and choose which functions they want to trace agnostically from other users of the function tracer, we need to pass the ops struct to the ftrace_set_filter() functions. The functions ftrace_set_global_filter() and ftrace_set_global_notrace() is added to keep the old filter functions which are used to modify the generic function tracers. Signed-off-by: Steven Rostedt --- include/linux/ftrace.h | 7 ++++++- kernel/trace/ftrace.c | 46 +++++++++++++++++++++++++++++++++++++++++-- kernel/trace/trace_selftest.c | 4 ++-- 3 files changed, 52 insertions(+), 5 deletions(-) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index caba694a62b..9d88e1cb5db 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -179,7 +179,12 @@ struct dyn_ftrace { }; int ftrace_force_update(void); -void ftrace_set_filter(unsigned char *buf, int len, int reset); +void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, + int len, int reset); +void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, + int len, int reset); +void ftrace_set_global_filter(unsigned char *buf, int len, int reset); +void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); int register_ftrace_command(struct ftrace_func_command *cmd); int unregister_ftrace_command(struct ftrace_func_command *cmd); diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 5b3ee04e39d..d017c2c82c4 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2826,6 +2826,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, struct ftrace_hash *hash; int ret; + /* All global ops uses the global ops filters */ + if (ops->flags & FTRACE_OPS_FL_GLOBAL) + ops = &global_ops; + if (unlikely(ftrace_disabled)) return -ENODEV; @@ -2856,6 +2860,41 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, /** * ftrace_set_filter - set a function to filter on in ftrace + * @ops - the ops to set the filter with + * @buf - the string that holds the function filter text. + * @len - the length of the string. + * @reset - non zero to reset all filters before applying this filter. + * + * Filters denote which functions should be enabled when tracing is enabled. + * If @buf is NULL and reset is set, all functions will be enabled for tracing. + */ +void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, + int len, int reset) +{ + ftrace_set_regex(ops, buf, len, reset, 1); +} +EXPORT_SYMBOL_GPL(ftrace_set_filter); + +/** + * ftrace_set_notrace - set a function to not trace in ftrace + * @ops - the ops to set the notrace filter with + * @buf - the string that holds the function notrace text. + * @len - the length of the string. + * @reset - non zero to reset all filters before applying this filter. + * + * Notrace Filters denote which functions should not be enabled when tracing + * is enabled. If @buf is NULL and reset is set, all functions will be enabled + * for tracing. + */ +void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, + int len, int reset) +{ + ftrace_set_regex(ops, buf, len, reset, 0); +} +EXPORT_SYMBOL_GPL(ftrace_set_notrace); +/** + * ftrace_set_filter - set a function to filter on in ftrace + * @ops - the ops to set the filter with * @buf - the string that holds the function filter text. * @len - the length of the string. * @reset - non zero to reset all filters before applying this filter. @@ -2863,13 +2902,15 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, * Filters denote which functions should be enabled when tracing is enabled. * If @buf is NULL and reset is set, all functions will be enabled for tracing. */ -void ftrace_set_filter(unsigned char *buf, int len, int reset) +void ftrace_set_global_filter(unsigned char *buf, int len, int reset) { ftrace_set_regex(&global_ops, buf, len, reset, 1); } +EXPORT_SYMBOL_GPL(ftrace_set_global_filter); /** * ftrace_set_notrace - set a function to not trace in ftrace + * @ops - the ops to set the notrace filter with * @buf - the string that holds the function notrace text. * @len - the length of the string. * @reset - non zero to reset all filters before applying this filter. @@ -2878,10 +2919,11 @@ void ftrace_set_filter(unsigned char *buf, int len, int reset) * is enabled. If @buf is NULL and reset is set, all functions will be enabled * for tracing. */ -void ftrace_set_notrace(unsigned char *buf, int len, int reset) +void ftrace_set_global_notrace(unsigned char *buf, int len, int reset) { ftrace_set_regex(&global_ops, buf, len, reset, 0); } +EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); /* * command line interface to allow users to set filters on boot up. diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 659732eba07..0fa2db305b7 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -131,7 +131,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); /* filter only on our function */ - ftrace_set_filter(func_name, strlen(func_name), 1); + ftrace_set_global_filter(func_name, strlen(func_name), 1); /* enable tracing */ ret = tracer_init(trace, tr); @@ -181,7 +181,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, tracer_enabled = save_tracer_enabled; /* Enable tracing on all functions again */ - ftrace_set_filter(NULL, 0, 1); + ftrace_set_global_filter(NULL, 0, 1); return ret; } -- cgit v1.2.3 From 95950c2ecb31314ef827428e43ff771cf3b037e5 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 6 May 2011 00:08:51 -0400 Subject: ftrace: Add self-tests for multiple function trace users Add some basic sanity tests for multiple users of the function tracer at startup. Signed-off-by: Steven Rostedt --- kernel/trace/trace.h | 2 + kernel/trace/trace_selftest.c | 210 +++++++++++++++++++++++++++++++++- kernel/trace/trace_selftest_dynamic.c | 6 + 3 files changed, 217 insertions(+), 1 deletion(-) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 5e9dfc6286d..6b69c4bd306 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -419,6 +419,8 @@ extern void trace_find_cmdline(int pid, char comm[]); extern unsigned long ftrace_update_tot_cnt; #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func extern int DYN_FTRACE_TEST_NAME(void); +#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 +extern int DYN_FTRACE_TEST_NAME2(void); #endif extern int ring_buffer_expanded; diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 0fa2db305b7..288541f977f 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -101,6 +101,206 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) #ifdef CONFIG_DYNAMIC_FTRACE +static int trace_selftest_test_probe1_cnt; +static void trace_selftest_test_probe1_func(unsigned long ip, + unsigned long pip) +{ + trace_selftest_test_probe1_cnt++; +} + +static int trace_selftest_test_probe2_cnt; +static void trace_selftest_test_probe2_func(unsigned long ip, + unsigned long pip) +{ + trace_selftest_test_probe2_cnt++; +} + +static int trace_selftest_test_probe3_cnt; +static void trace_selftest_test_probe3_func(unsigned long ip, + unsigned long pip) +{ + trace_selftest_test_probe3_cnt++; +} + +static int trace_selftest_test_global_cnt; +static void trace_selftest_test_global_func(unsigned long ip, + unsigned long pip) +{ + trace_selftest_test_global_cnt++; +} + +static int trace_selftest_test_dyn_cnt; +static void trace_selftest_test_dyn_func(unsigned long ip, + unsigned long pip) +{ + trace_selftest_test_dyn_cnt++; +} + +static struct ftrace_ops test_probe1 = { + .func = trace_selftest_test_probe1_func, +}; + +static struct ftrace_ops test_probe2 = { + .func = trace_selftest_test_probe2_func, +}; + +static struct ftrace_ops test_probe3 = { + .func = trace_selftest_test_probe3_func, +}; + +static struct ftrace_ops test_global = { + .func = trace_selftest_test_global_func, + .flags = FTRACE_OPS_FL_GLOBAL, +}; + +static void print_counts(void) +{ + printk("(%d %d %d %d %d) ", + trace_selftest_test_probe1_cnt, + trace_selftest_test_probe2_cnt, + trace_selftest_test_probe3_cnt, + trace_selftest_test_global_cnt, + trace_selftest_test_dyn_cnt); +} + +static void reset_counts(void) +{ + trace_selftest_test_probe1_cnt = 0; + trace_selftest_test_probe2_cnt = 0; + trace_selftest_test_probe3_cnt = 0; + trace_selftest_test_global_cnt = 0; + trace_selftest_test_dyn_cnt = 0; +} + +static int trace_selftest_ops(int cnt) +{ + int save_ftrace_enabled = ftrace_enabled; + struct ftrace_ops *dyn_ops; + char *func1_name; + char *func2_name; + int len1; + int len2; + int ret = -1; + + printk(KERN_CONT "PASSED\n"); + pr_info("Testing dynamic ftrace ops #%d: ", cnt); + + ftrace_enabled = 1; + reset_counts(); + + /* Handle PPC64 '.' name */ + func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); + func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); + len1 = strlen(func1_name); + len2 = strlen(func2_name); + + /* + * Probe 1 will trace function 1. + * Probe 2 will trace function 2. + * Probe 3 will trace functions 1 and 2. + */ + ftrace_set_filter(&test_probe1, func1_name, len1, 1); + ftrace_set_filter(&test_probe2, func2_name, len2, 1); + ftrace_set_filter(&test_probe3, func1_name, len1, 1); + ftrace_set_filter(&test_probe3, func2_name, len2, 0); + + register_ftrace_function(&test_probe1); + register_ftrace_function(&test_probe2); + register_ftrace_function(&test_probe3); + register_ftrace_function(&test_global); + + DYN_FTRACE_TEST_NAME(); + + print_counts(); + + if (trace_selftest_test_probe1_cnt != 1) + goto out; + if (trace_selftest_test_probe2_cnt != 0) + goto out; + if (trace_selftest_test_probe3_cnt != 1) + goto out; + if (trace_selftest_test_global_cnt == 0) + goto out; + + DYN_FTRACE_TEST_NAME2(); + + print_counts(); + + if (trace_selftest_test_probe1_cnt != 1) + goto out; + if (trace_selftest_test_probe2_cnt != 1) + goto out; + if (trace_selftest_test_probe3_cnt != 2) + goto out; + + /* Add a dynamic probe */ + dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); + if (!dyn_ops) { + printk("MEMORY ERROR "); + goto out; + } + + dyn_ops->func = trace_selftest_test_dyn_func; + + register_ftrace_function(dyn_ops); + + trace_selftest_test_global_cnt = 0; + + DYN_FTRACE_TEST_NAME(); + + print_counts(); + + if (trace_selftest_test_probe1_cnt != 2) + goto out_free; + if (trace_selftest_test_probe2_cnt != 1) + goto out_free; + if (trace_selftest_test_probe3_cnt != 3) + goto out_free; + if (trace_selftest_test_global_cnt == 0) + goto out; + if (trace_selftest_test_dyn_cnt == 0) + goto out_free; + + DYN_FTRACE_TEST_NAME2(); + + print_counts(); + + if (trace_selftest_test_probe1_cnt != 2) + goto out_free; + if (trace_selftest_test_probe2_cnt != 2) + goto out_free; + if (trace_selftest_test_probe3_cnt != 4) + goto out_free; + + ret = 0; + out_free: + unregister_ftrace_function(dyn_ops); + kfree(dyn_ops); + + out: + /* Purposely unregister in the same order */ + unregister_ftrace_function(&test_probe1); + unregister_ftrace_function(&test_probe2); + unregister_ftrace_function(&test_probe3); + unregister_ftrace_function(&test_global); + + /* Make sure everything is off */ + reset_counts(); + DYN_FTRACE_TEST_NAME(); + DYN_FTRACE_TEST_NAME(); + + if (trace_selftest_test_probe1_cnt || + trace_selftest_test_probe2_cnt || + trace_selftest_test_probe3_cnt || + trace_selftest_test_global_cnt || + trace_selftest_test_dyn_cnt) + ret = -1; + + ftrace_enabled = save_ftrace_enabled; + + return ret; +} + /* Test dynamic code modification and ftrace filters */ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, struct trace_array *tr, @@ -166,16 +366,20 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, /* check the trace buffer */ ret = trace_test_buffer(tr, &count); - trace->reset(tr); tracing_start(); /* we should only have one item */ if (!ret && count != 1) { + trace->reset(tr); printk(KERN_CONT ".. filter failed count=%ld ..", count); ret = -1; goto out; } + /* Test the ops with global tracing running */ + ret = trace_selftest_ops(1); + trace->reset(tr); + out: ftrace_enabled = save_ftrace_enabled; tracer_enabled = save_tracer_enabled; @@ -183,6 +387,10 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, /* Enable tracing on all functions again */ ftrace_set_global_filter(NULL, 0, 1); + /* Test the ops with global tracing off */ + if (!ret) + ret = trace_selftest_ops(2); + return ret; } #else diff --git a/kernel/trace/trace_selftest_dynamic.c b/kernel/trace/trace_selftest_dynamic.c index 54dd77cce5b..b4c475a0a48 100644 --- a/kernel/trace/trace_selftest_dynamic.c +++ b/kernel/trace/trace_selftest_dynamic.c @@ -5,3 +5,9 @@ int DYN_FTRACE_TEST_NAME(void) /* used to call mcount */ return 0; } + +int DYN_FTRACE_TEST_NAME2(void) +{ + /* used to call mcount */ + return 0; +} -- cgit v1.2.3 From d6971822c288ce5547190c6f812cc978d6520777 Mon Sep 17 00:00:00 2001 From: Michal Marek Date: Tue, 17 May 2011 15:36:46 +0200 Subject: ftrace/kbuild: Add recordmcount files to force full build Modifications to recordmcount must be performed on all object files to stay consistent with what the kernel code may expect. Add the recordmcount files to the main dependencies to make sure any change to them causes a full recompile. Signed-off-by: Michal Marek Link: http://lkml.kernel.org/r/20110517133646.GP13293@sepie.suse.cz Signed-off-by: Steven Rostedt --- scripts/Makefile.build | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/scripts/Makefile.build b/scripts/Makefile.build index fdca952f6a4..6165622c3e2 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -255,6 +255,8 @@ sub_cmd_record_mcount = \ if [ $(@) != "scripts/mod/empty.o" ]; then \ $(objtree)/scripts/recordmcount $(RECORDMCOUNT_FLAGS) "$(@)"; \ fi; +recordmcount_source := $(srctree)/scripts/recordmcount.c \ + $(srctree)/scripts/recordmcount.h else sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \ "$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \ @@ -262,6 +264,7 @@ sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH "$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CFLAGS)" \ "$(LD)" "$(NM)" "$(RM)" "$(MV)" \ "$(if $(part-of-module),1,0)" "$(@)"; +recordmcount_source := $(srctree)/scripts/recordmcount.pl endif cmd_record_mcount = \ if [ "$(findstring -pg,$(_c_flags))" = "-pg" ]; then \ @@ -282,13 +285,13 @@ define rule_cc_o_c endef # Built-in and composite module parts -$(obj)/%.o: $(src)/%.c FORCE +$(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE $(call cmd,force_checksrc) $(call if_changed_rule,cc_o_c) # Single-part modules are special since we need to mark them in $(MODVERDIR) -$(single-used-m): $(obj)/%.o: $(src)/%.c FORCE +$(single-used-m): $(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE $(call cmd,force_checksrc) $(call if_changed_rule,cc_o_c) @{ echo $(@:.o=.ko); echo $@; } > $(MODVERDIR)/$(@F:.o=.mod) -- cgit v1.2.3 From 2cba3ffb9a9db3874304a1739002d053d53c738b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 19 May 2011 13:30:56 +0200 Subject: perf stat: Add -d -d and -d -d -d options to show more CPU events Print even more detailed statistics if requested via perf stat -d: -d: detailed events, L1 and LLC data cache -d -d: more detailed events, dTLB and iTLB events -d -d -d: very detailed events, adding prefetch events Full output looks like this now: Performance counter stats for '/home/mingo/hackbench 10' (5 runs): 1703.674707 task-clock # 8.709 CPUs utilized ( +- 4.19% ) 49,068 context-switches # 0.029 M/sec ( +- 16.66% ) 8,303 CPU-migrations # 0.005 M/sec ( +- 24.90% ) 17,397 page-faults # 0.010 M/sec ( +- 0.46% ) 2,345,389,239 cycles # 1.377 GHz ( +- 4.61% ) [55.90%] 1,884,503,527 stalled-cycles-frontend # 80.35% frontend cycles idle ( +- 5.67% ) [50.39%] 743,919,737 stalled-cycles-backend # 31.72% backend cycles idle ( +- 8.75% ) [49.91%] 1,314,416,379 instructions # 0.56 insns per cycle # 1.43 stalled cycles per insn ( +- 2.53% ) [60.87%] 272,592,567 branches # 160.003 M/sec ( +- 1.74% ) [56.56%] 3,794,846 branch-misses # 1.39% of all branches ( +- 6.59% ) [58.50%] 449,982,778 L1-dcache-loads # 264.125 M/sec ( +- 2.47% ) [49.88%] 22,404,961 L1-dcache-load-misses # 4.98% of all L1-dcache hits ( +- 6.08% ) [55.05%] 6,204,750 LLC-loads # 3.642 M/sec ( +- 8.91% ) [43.75%] 1,837,411 LLC-load-misses # 1.078 M/sec ( +- 7.27% ) [12.07%] 411,440,421 L1-icache-loads # 241.502 M/sec ( +- 5.60% ) [36.52%] 27,556,832 L1-icache-load-misses # 16.175 M/sec ( +- 7.46% ) [46.72%] 464,067,627 dTLB-loads # 272.392 M/sec ( +- 4.46% ) [54.17%] 10,765,648 dTLB-load-misses # 6.319 M/sec ( +- 3.18% ) [48.68%] 1,273,080,386 iTLB-loads # 747.256 M/sec ( +- 3.38% ) [47.53%] 117,481 iTLB-load-misses # 0.069 M/sec ( +- 14.99% ) [47.01%] 4,590,653 L1-dcache-prefetches # 2.695 M/sec ( +- 4.49% ) [46.19%] 1,712,660 L1-dcache-prefetch-misses # 1.005 M/sec ( +- 3.75% ) [44.82%] 0.195622057 seconds time elapsed ( +- 6.84% ) Also clean up the attribute construction code to be appending, and factor it out into add_default_attributes(). Tweak the coverage percentage printout a bit, so that it's easier to view it alongside the +- sttddev colum. Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Steven Rostedt Link: http://lkml.kernel.org/n/tip-to3kgu04449s64062val8b62@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 209 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 154 insertions(+), 55 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 602c3c96fa1..a89fc083536 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -6,24 +6,28 @@ * * Sample output: - $ perf stat ~/hackbench 10 - Time: 0.104 + $ perf stat ./hackbench 10 - Performance counter stats for '/home/mingo/hackbench': + Time: 0.118 - 1255.538611 task clock ticks # 10.143 CPU utilization factor - 54011 context switches # 0.043 M/sec - 385 CPU migrations # 0.000 M/sec - 17755 pagefaults # 0.014 M/sec - 3808323185 CPU cycles # 3033.219 M/sec - 1575111190 instructions # 1254.530 M/sec - 17367895 cache references # 13.833 M/sec - 7674421 cache misses # 6.112 M/sec + Performance counter stats for './hackbench 10': - Wall-clock time elapsed: 123.786620 msecs + 1708.761321 task-clock # 11.037 CPUs utilized + 41,190 context-switches # 0.024 M/sec + 6,735 CPU-migrations # 0.004 M/sec + 17,318 page-faults # 0.010 M/sec + 5,205,202,243 cycles # 3.046 GHz + 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle + 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle + 2,603,501,247 instructions # 0.50 insns per cycle + # 1.48 stalled cycles per insn + 484,357,498 branches # 283.455 M/sec + 6,388,934 branch-misses # 1.32% of all branches + + 0.154822978 seconds time elapsed * - * Copyright (C) 2008, Red Hat Inc, Ingo Molnar + * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar * * Improvements and fixes by: * @@ -75,22 +79,10 @@ static struct perf_event_attr default_attrs[] = { }; /* - * Detailed stats: + * Detailed stats (-d), covering the L1 and last level data caches: */ static struct perf_event_attr detailed_attrs[] = { - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, - - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, - { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, - { .type = PERF_TYPE_HW_CACHE, .config = PERF_COUNT_HW_CACHE_L1D << 0 | @@ -116,6 +108,69 @@ static struct perf_event_attr detailed_attrs[] = { (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, }; +/* + * Very detailed stats (-d -d), covering the instruction cache and the TLB caches: + */ +static struct perf_event_attr very_detailed_attrs[] = { + + { .type = PERF_TYPE_HW_CACHE, + .config = + PERF_COUNT_HW_CACHE_L1I << 0 | + (PERF_COUNT_HW_CACHE_OP_READ << 8) | + (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, + + { .type = PERF_TYPE_HW_CACHE, + .config = + PERF_COUNT_HW_CACHE_L1I << 0 | + (PERF_COUNT_HW_CACHE_OP_READ << 8) | + (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, + + { .type = PERF_TYPE_HW_CACHE, + .config = + PERF_COUNT_HW_CACHE_DTLB << 0 | + (PERF_COUNT_HW_CACHE_OP_READ << 8) | + (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, + + { .type = PERF_TYPE_HW_CACHE, + .config = + PERF_COUNT_HW_CACHE_DTLB << 0 | + (PERF_COUNT_HW_CACHE_OP_READ << 8) | + (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, + + { .type = PERF_TYPE_HW_CACHE, + .config = + PERF_COUNT_HW_CACHE_ITLB << 0 | + (PERF_COUNT_HW_CACHE_OP_READ << 8) | + (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, + + { .type = PERF_TYPE_HW_CACHE, + .config = + PERF_COUNT_HW_CACHE_ITLB << 0 | + (PERF_COUNT_HW_CACHE_OP_READ << 8) | + (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, + +}; + +/* + * Very, very detailed stats (-d -d -d), adding prefetch events: + */ +static struct perf_event_attr very_very_detailed_attrs[] = { + + { .type = PERF_TYPE_HW_CACHE, + .config = + PERF_COUNT_HW_CACHE_L1D << 0 | + (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | + (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) }, + + { .type = PERF_TYPE_HW_CACHE, + .config = + PERF_COUNT_HW_CACHE_L1D << 0 | + (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | + (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, +}; + + + struct perf_evlist *evsel_list; static bool system_wide = false; @@ -129,7 +184,7 @@ static pid_t target_pid = -1; static pid_t target_tid = -1; static pid_t child_pid = -1; static bool null_run = false; -static bool detailed_run = false; +static int detailed_run = 0; static bool sync_run = false; static bool big_num = true; static int big_num_opt = -1; @@ -464,7 +519,7 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) { double msecs = avg / 1e6; char cpustr[16] = { '\0', }; - const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-24s"; + const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-25s"; if (no_aggr) sprintf(cpustr, "CPU%*d%s", @@ -584,9 +639,9 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) if (csv_output) fmt = "%s%.0f%s%s"; else if (big_num) - fmt = "%s%'18.0f%s%-24s"; + fmt = "%s%'18.0f%s%-25s"; else - fmt = "%s%18.0f%s%-24s"; + fmt = "%s%18.0f%s%-25s"; if (no_aggr) sprintf(cpustr, "CPU%*d%s", @@ -616,7 +671,7 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) if (total && avg) { ratio = total / avg; - fprintf(stderr, "\n # %5.2f stalled cycles per insn", ratio); + fprintf(stderr, "\n # %5.2f stalled cycles per insn", ratio); } } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) && @@ -704,7 +759,7 @@ static void print_counter_aggr(struct perf_evsel *counter) avg_enabled = avg_stats(&ps->res_stats[1]); avg_running = avg_stats(&ps->res_stats[2]); - fprintf(stderr, " (%.2f%%)", 100 * avg_running / avg_enabled); + fprintf(stderr, " [%5.2f%%]", 100 * avg_running / avg_enabled); } fprintf(stderr, "\n"); } @@ -854,7 +909,7 @@ static const struct option options[] = { "repeat command and print average + stddev (max: 100)"), OPT_BOOLEAN('n', "null", &null_run, "null run - dont start any counters"), - OPT_BOOLEAN('d', "detailed", &detailed_run, + OPT_INCR('d', "detailed", &detailed_run, "detailed run - start a lot of events"), OPT_BOOLEAN('S', "sync", &sync_run, "call sync() before starting a run"), @@ -873,6 +928,70 @@ static const struct option options[] = { OPT_END() }; +/* + * Add default attributes, if there were no attributes specified or + * if -d/--detailed, -d -d or -d -d -d is used: + */ +static int add_default_attributes(void) +{ + struct perf_evsel *pos; + size_t attr_nr = 0; + size_t c; + + /* Set attrs if no event is selected and !null_run: */ + if (null_run) + return 0; + + if (!evsel_list->nr_entries) { + for (c = 0; c < ARRAY_SIZE(default_attrs); c++) { + pos = perf_evsel__new(default_attrs + c, c + attr_nr); + if (pos == NULL) + return -1; + perf_evlist__add(evsel_list, pos); + } + attr_nr += c; + } + + /* Detailed events get appended to the event list: */ + + if (detailed_run < 1) + return 0; + + /* Append detailed run extra attributes: */ + for (c = 0; c < ARRAY_SIZE(detailed_attrs); c++) { + pos = perf_evsel__new(detailed_attrs + c, c + attr_nr); + if (pos == NULL) + return -1; + perf_evlist__add(evsel_list, pos); + } + attr_nr += c; + + if (detailed_run < 2) + return 0; + + /* Append very detailed run extra attributes: */ + for (c = 0; c < ARRAY_SIZE(very_detailed_attrs); c++) { + pos = perf_evsel__new(very_detailed_attrs + c, c + attr_nr); + if (pos == NULL) + return -1; + perf_evlist__add(evsel_list, pos); + } + + if (detailed_run < 3) + return 0; + + /* Append very, very detailed run extra attributes: */ + for (c = 0; c < ARRAY_SIZE(very_very_detailed_attrs); c++) { + pos = perf_evsel__new(very_very_detailed_attrs + c, c + attr_nr); + if (pos == NULL) + return -1; + perf_evlist__add(evsel_list, pos); + } + + + return 0; +} + int cmd_stat(int argc, const char **argv, const char *prefix __used) { struct perf_evsel *pos; @@ -918,28 +1037,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) usage_with_options(stat_usage, options); } - /* Set attrs and nr_counters if no event is selected and !null_run */ - if (detailed_run) { - size_t c; - - for (c = 0; c < ARRAY_SIZE(detailed_attrs); ++c) { - pos = perf_evsel__new(&detailed_attrs[c], c); - if (pos == NULL) - goto out; - perf_evlist__add(evsel_list, pos); - } - } - /* Set attrs and nr_counters if no event is selected and !null_run */ - if (!detailed_run && !null_run && !evsel_list->nr_entries) { - size_t c; - - for (c = 0; c < ARRAY_SIZE(default_attrs); ++c) { - pos = perf_evsel__new(&default_attrs[c], c); - if (pos == NULL) - goto out; - perf_evlist__add(evsel_list, pos); - } - } + if (add_default_attributes()) + goto out; if (target_pid != -1) target_tid = target_pid; -- cgit v1.2.3 From c3305257cd4df63e03e21e331a0140ae9c0faccc Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 19 May 2011 14:01:42 +0200 Subject: perf stat: Add more cache-miss percentage printouts Print out the cache-miss percentage as well if the cache refs were collected, for all the generic cache event types. Before: 11,103,723,230 dTLB-loads # 622.471 M/sec ( +- 0.30% ) 87,065,337 dTLB-load-misses # 4.881 M/sec ( +- 0.90% ) After: 11,353,713,242 dTLB-loads # 626.020 M/sec ( +- 0.35% ) 113,393,472 dTLB-load-misses # 1.00% of all dTLB cache hits ( +- 0.49% ) Also ASCII color highlight too high percentages, them when it's executed on the console. Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Steven Rostedt Link: http://lkml.kernel.org/n/tip-lkhwxsevdbd9a8nymx0vxc3y@git.kernel.org Signed-off-by: Ingo Molnar --- tools/perf/builtin-stat.c | 138 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 136 insertions(+), 2 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index a89fc083536..a9f06715e44 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -261,6 +261,10 @@ struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS]; struct stats runtime_branches_stats[MAX_NR_CPUS]; struct stats runtime_cacherefs_stats[MAX_NR_CPUS]; struct stats runtime_l1_dcache_stats[MAX_NR_CPUS]; +struct stats runtime_l1_icache_stats[MAX_NR_CPUS]; +struct stats runtime_ll_cache_stats[MAX_NR_CPUS]; +struct stats runtime_itlb_cache_stats[MAX_NR_CPUS]; +struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS]; struct stats walltime_nsecs_stats; static int create_perf_stat_counter(struct perf_evsel *evsel) @@ -317,6 +321,14 @@ static void update_shadow_stats(struct perf_evsel *counter, u64 *count) update_stats(&runtime_cacherefs_stats[0], count[0]); else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D)) update_stats(&runtime_l1_dcache_stats[0], count[0]); + else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I)) + update_stats(&runtime_l1_icache_stats[0], count[0]); + else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL)) + update_stats(&runtime_ll_cache_stats[0], count[0]); + else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB)) + update_stats(&runtime_dtlb_cache_stats[0], count[0]); + else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB)) + update_stats(&runtime_itlb_cache_stats[0], count[0]); } /* @@ -630,6 +642,98 @@ static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, dou fprintf(stderr, " of all L1-dcache hits "); } +static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, double avg) +{ + double total, ratio = 0.0; + const char *color; + + total = avg_stats(&runtime_l1_icache_stats[cpu]); + + if (total) + ratio = avg / total * 100.0; + + color = PERF_COLOR_NORMAL; + if (ratio > 20.0) + color = PERF_COLOR_RED; + else if (ratio > 10.0) + color = PERF_COLOR_MAGENTA; + else if (ratio > 5.0) + color = PERF_COLOR_YELLOW; + + fprintf(stderr, " # "); + color_fprintf(stderr, color, "%6.2f%%", ratio); + fprintf(stderr, " of all L1-icache hits "); +} + +static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) +{ + double total, ratio = 0.0; + const char *color; + + total = avg_stats(&runtime_dtlb_cache_stats[cpu]); + + if (total) + ratio = avg / total * 100.0; + + color = PERF_COLOR_NORMAL; + if (ratio > 20.0) + color = PERF_COLOR_RED; + else if (ratio > 10.0) + color = PERF_COLOR_MAGENTA; + else if (ratio > 5.0) + color = PERF_COLOR_YELLOW; + + fprintf(stderr, " # "); + color_fprintf(stderr, color, "%6.2f%%", ratio); + fprintf(stderr, " of all dTLB cache hits "); +} + +static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) +{ + double total, ratio = 0.0; + const char *color; + + total = avg_stats(&runtime_itlb_cache_stats[cpu]); + + if (total) + ratio = avg / total * 100.0; + + color = PERF_COLOR_NORMAL; + if (ratio > 20.0) + color = PERF_COLOR_RED; + else if (ratio > 10.0) + color = PERF_COLOR_MAGENTA; + else if (ratio > 5.0) + color = PERF_COLOR_YELLOW; + + fprintf(stderr, " # "); + color_fprintf(stderr, color, "%6.2f%%", ratio); + fprintf(stderr, " of all iTLB cache hits "); +} + +static void print_ll_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) +{ + double total, ratio = 0.0; + const char *color; + + total = avg_stats(&runtime_ll_cache_stats[cpu]); + + if (total) + ratio = avg / total * 100.0; + + color = PERF_COLOR_NORMAL; + if (ratio > 20.0) + color = PERF_COLOR_RED; + else if (ratio > 10.0) + color = PERF_COLOR_MAGENTA; + else if (ratio > 5.0) + color = PERF_COLOR_YELLOW; + + fprintf(stderr, " # "); + color_fprintf(stderr, color, "%6.2f%%", ratio); + fprintf(stderr, " of all LL-cache hits "); +} + static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) { double total, ratio = 0.0; @@ -684,6 +788,34 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && runtime_l1_dcache_stats[cpu].n != 0) { print_l1_dcache_misses(cpu, evsel, avg); + } else if ( + evsel->attr.type == PERF_TYPE_HW_CACHE && + evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I | + ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | + ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && + runtime_l1_icache_stats[cpu].n != 0) { + print_l1_icache_misses(cpu, evsel, avg); + } else if ( + evsel->attr.type == PERF_TYPE_HW_CACHE && + evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB | + ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | + ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && + runtime_dtlb_cache_stats[cpu].n != 0) { + print_dtlb_cache_misses(cpu, evsel, avg); + } else if ( + evsel->attr.type == PERF_TYPE_HW_CACHE && + evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB | + ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | + ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && + runtime_itlb_cache_stats[cpu].n != 0) { + print_itlb_cache_misses(cpu, evsel, avg); + } else if ( + evsel->attr.type == PERF_TYPE_HW_CACHE && + evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL | + ((PERF_COUNT_HW_CACHE_OP_READ) << 8) | + ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) && + runtime_ll_cache_stats[cpu].n != 0) { + print_ll_cache_misses(cpu, evsel, avg); } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) && runtime_cacherefs_stats[cpu].n != 0) { total = avg_stats(&runtime_cacherefs_stats[cpu]); @@ -842,10 +974,12 @@ static void print_stat(int argc, const char **argv) } if (!csv_output) { - fprintf(stderr, "\n"); - fprintf(stderr, " %18.9f seconds time elapsed", + if (!null_run) + fprintf(stderr, "\n"); + fprintf(stderr, " %17.9f seconds time elapsed", avg_stats(&walltime_nsecs_stats)/1e9); if (run_count > 1) { + fprintf(stderr, " "); print_noise_pct(stddev_stats(&walltime_nsecs_stats), avg_stats(&walltime_nsecs_stats)); } -- cgit v1.2.3