// SPDX-License-Identifier: GPL-2.0
/*
* Infrastructure for profiling code inserted by 'gcc -pg'.
*
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
* Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
*
* Originally ported from the -rt patch by:
* Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
*
* Based on code in the latency_tracer, that is:
*
* Copyright (C) 2004-2006 Ingo Molnar
* Copyright (C) 2004 Nadia Yvette Chambers
*/
#include <linux/stop_machine.h>
#include <linux/clocksource.h>
#include <linux/sched/task.h>
#include <linux/kallsyms.h>
#include <linux/security.h>
#include <linux/seq_file.h>
#include <linux/tracefs.h>
#include <linux/hardirq.h>
#include <linux/kthread.h>
#include <linux/uaccess.h>
#include <linux/bsearch.h>
#include <linux/module.h>
#include <linux/ftrace.h>
#include <linux/sysctl.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/sort.h>
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/rcupdate.h>
#include <linux/kprobes.h>
#include <trace/events/sched.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include "ftrace_internal.h"
#include "trace_output.h"
#include "trace_stat.h"
/* Flags that do not get reset */
#define FTRACE_NOCLEAR_FLAGS (FTRACE_FL_DISABLED | FTRACE_FL_TOUCHED | \
FTRACE_FL_MODIFIED)
#define FTRACE_INVALID_FUNCTION "__ftrace_invalid_address__"
#define FTRACE_WARN_ON(cond) \
({ \
int ___r = cond; \
if (WARN_ON(___r)) \
ftrace_kill(); \
___r; \
})
#define FTRACE_WARN_ON_ONCE(cond) \
({ \
int ___r = cond; \
if (WARN_ON_ONCE(___r)) \
ftrace_kill(); \
___r; \
})
/* hash bits for specific function selection */
#define FTRACE_HASH_MAX_BITS 12
#ifdef CONFIG_DYNAMIC_FTRACE
#define INIT_OPS_HASH(opsname) \
.func_hash = &opsname.local_hash, \
.local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), \
.subop_list = LIST_HEAD_INIT(opsname.subop_list),
#else
#define INIT_OPS_HASH(opsname)
#endif
enum {
FTRACE_MODIFY_ENABLE_FL = (1 << 0),
FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1),
};
struct ftrace_ops ftrace_list_end __read_mostly = {
.func = ftrace_stub,
.flags = FTRACE_OPS_FL_STUB,
INIT_OPS_HASH(ftrace_list_end)
};
/* ftrace_enabled is a method to turn ftrace on or off */
int ftrace_enabled __read_mostly;
static int __maybe_unused last_ftrace_enabled;
/* Current function tracing op */
struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
/* What to set function_trace_op to */
static struct ftrace_ops *set_function_trace_op;
bool ftrace_pids_enabled(struct ftrace_ops *ops)
{
struct trace_array *tr;
if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
return false;
tr = ops->private;
return tr->function_pids != NULL || tr->function_no_pids != NULL;
}
static void ftrace_update_trampoline(struct ftrace_ops *ops);
/*
* ftrace_disabled is set when an anomaly is discovered.
* ftrace_disabled is much stronger than ftrace_enabled.
*/
static int ftrace_disabled __read_mostly;
DEFINE_MUTEX(ftrace_lock);
struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = (struct ftrace_ops __rcu *)&ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
struct ftrace_ops global_ops;
/* Defined by vmlinux.lds.h see the comment above arch_ftrace_ops_list_func for details */
void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs);
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
/*
* Stub used to invoke the list ops without requiring a separate trampoline.
*/
const struct ftrace_ops ftrace_list_ops = {
.func = ftrace_ops_list_func,
.flags = FTRACE_OPS_FL_STUB,
};
static void ftrace_ops_nop_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op,
struct ftrace_regs *fregs)
{
/* do nothing */
}
/*
* Stub used when a call site is disabled. May be called transiently by threads
* which have made it into ftrace_caller but haven't yet recovered the ops at
* the point the call site is disabled.
*/
const struct ftrace_ops ftrace_nop_ops = {
.func = ftrace_ops_nop_func,
.flags = FTRACE_OPS_FL_STUB,
};
#endif
static inline void ftrace_ops_init(struct ftrace_ops *ops)
{
#ifdef CONFIG_DYNAMIC_FTRACE
if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
mutex_init(&ops->local_hash.regex_lock);
INIT_LIST_HEAD(&ops->subop_list);
ops->func_hash = &ops->local_hash;
ops->flags |= FTRACE_OPS_FL_INITIALIZED;
}
#endif
}
/* Call this function for when a callback filters on set_ftrace_pid */
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
struct trace_array *tr = op->private;
int pid;
if (tr) {
pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
if (pid == FTRACE_PID_IGNORE)
return;
if (pid != FTRACE_PID_TRACE &&
pid != current->pid)
return;
}
op->saved_func(ip, parent_ip, op, fregs);
}
void ftrace_sync_ipi(void *data)
{
/* Probably not needed, but do it anyway */
smp_rmb();
}
static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
{
/*
* If this is a dynamic or RCU ops, or we force list func,
* then it needs to call the list anyway.
*/
if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
FTRACE_FORCE_LIST_FUNC)
return ftrace_ops_list_func;
return ftrace_ops_get_func(ops);
}
static void update_ftrace_function(void)
{
ftrace_func_t func;
/*
* Prepare the ftrace_ops that the arch callback will use.
* If there's only one ftrace_ops registered, the ftrace_ops_list
* will point to the ops we want.
*/
set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
lockdep_is_held(&ftrace_lock));
/* If there's no ftrace_ops registered, just call the stub function */
if (set_function_trace_op == &ftrace_list_end) {
func = ftrace_stub;
/*
* If we are at the end of the list and this ops is
* recursion safe and not dynamic and the arch supports passing ops,
* then have the mcount trampoline call the function directly.
*/
} else if (rcu_dereference_protected(ftrace_ops_list->next,
lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
func = ftrace_ops_get_list_func(ftrace_ops_list);
} else {
/* Just use the default ftrace_ops */
set_function_trace_op = &ftrace_list_end;
func = ftrace_ops_list_func;
}
/* If there's no change, then do nothing more here */
if (ftrace_trace_function == func)
return;
/*
* If we are using the list function, it doesn't care
* about the function_trace_ops.
*/
if (func == ftrace_ops_list_func) {
ftrace_trace_function = func;
/*
* Don't even bother setting function_trace_ops,
* it would be racy to do so anyway.
*/
return;
}
#ifndef CONFIG_DYNAMIC_FTRACE
/*
* For static tracing, we need to be a bit more careful.
* The function change takes affect immediately. Thus,
* we need to coordinate the setting of the function_trace_ops
* with the setting of the ftrace_trace_function.
*
* Set the function to the list ops, which will call the
* function we want, albeit indirectly, but it handles the
* ftrace_ops and doesn't depend on function_trace_op.
*/
ftrace_trace_function = ftrace_ops_list_func;
/*
* Make sure all CPUs see this. Yes this is slow, but static
* tracing is slow and nasty to have enabled.
*/
synchronize_rcu_tasks_rude();
/* Now all cpus are using the list ops. */
function_trace_op = set_function_trace_op;
/* Make sure the function_trace_op is visible on all CPUs */
smp_wmb();
/* Nasty way to force a rmb on all cpus */
smp_call_function(ftrace_sync_ipi, NULL, 1);
/* OK, we are all set to update the ftrace_trace_function now! */
#endif /* !CONFIG_DYNAMIC_FTRACE */
ftrace_trace_function = func;
}
static void add_ftrace_ops(struct ftrace_ops __rcu **list,
struct ftrace_ops *ops)
{
rcu_assign_pointer(ops->next, *list);
/*
* We are entering ops into the list but another
* CPU might be walking that list. We need to make sure
* the ops->next pointer is valid before another CPU sees
* the ops pointer included into the list.
*/
rcu_assign_pointer(*list, ops);
}
static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
struct ftrace_ops *ops)
{
struct ftrace_ops **p;
/*
* If we are removing the last function, then simply point
* to the ftrace_stub.
*/
if (rcu_dereference_protected(*list,
lockdep_is_held(&ftrace_lock)) == ops &&
rcu_dereference_protected(ops->next,
lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
rcu_assign_pointer(*list, &ftrace_list_end);
return 0;
}
for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
if (*p == ops)
break;
if (*p != ops)
return -1;
*p = (*p)->next;
return 0;
}
static void ftrace_update_trampoline(struct ftrace_ops *ops);
int __register_ftrace_function(struct ftrace_ops *ops)
{
if (ops->flags & FTRACE_OPS_FL_DELETED)
return -EINVAL;
if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
return -EBUSY;
#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
/*
* If the ftrace_ops specifies SAVE_REGS, then it only can be used
* if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
* Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
*/
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
!(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
return -EINVAL;
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
#endif
if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
return -EBUSY;
if (!is_kernel_core_data((unsigned long)ops))
ops->flags |= FTRACE_OPS_FL_DYNAMIC;
add_ftrace_ops(&ftrace_ops_list, ops);
/* Always save the function, and reset at unregistering */
ops->saved_func = ops->func;
if (ftrace_pids_enabled(ops))
ops->func = ftrace_pid_func;
ftrace_update_trampoline(ops);
if (ftrace_enabled)
update_ftrace_function();
return 0;
}
int __unregister_ftrace_function(struct ftrace_ops *ops)
{
int ret;
if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
return -EBUSY;
ret = remove_ftrace_ops(&ftrace_ops_list, ops);
if (ret < 0)
return ret;
if (ftrace_enabled)
update_ftrace_function();
ops->func = ops->saved_func;
return 0;
}
static void ftrace_update_pid_func(void)
{
struct ftrace_ops *op;
/* Only do something if we are tracing something */
if (ftrace_trace_function == ftrace_stub)
return;
do_for_each_ftrace_op(op, ftrace_ops_list) {
if (op->flags & FTRACE_OPS_FL_PID) {
op->func = ftrace_pids_enabled(op) ?
ftrace_pid_func : op->saved_func;
ftrace_update_trampoline(op);
}
} while_for_each_ftrace_op(op);
fgraph_update_pid_func();
update_ftrace_function();
}
#ifdef CONFIG_FUNCTION_PROFILER
struct ftrace_profile {
struct hlist_node node;
unsigned long ip;
unsigned long counter;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
unsigned long long time;
unsigned long long time_squared;
#endif
};
struct ftrace_profile_page {
struct ftrace_profile_page *next;
unsigned long index;
struct ftrace_profile records[];
};
struct ftrace_profile_stat {
atomic_t disabled;
struct hlist_head *hash;
struct ftrace_profile_page *pages;
struct ftrace_profile_page *start;
struct tracer_stat stat;
};
#define PROFILE_RECORDS_SIZE \
(PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
#define PROFILES_PER_PAGE \
(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
static int ftrace_profile_enabled __read_mostly;
/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
static DEFINE_MUTEX(ftrace_profile_lock);
static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
#define FTRACE_PROFILE_HASH_BITS 10
#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
static void *
function_stat_next(void *v, int idx)
{
struct ftrace_profile *rec = v;
struct ftrace_profile_page *pg;
pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
again:
if (idx != 0)
rec++;
if ((void *)rec >= (void *)&pg->records[pg->index]) {
pg = pg->next;
if (!pg)
return NULL;
rec = &pg->records[0];
if (!rec->counter)
goto again;
}
return rec;
}
static void *function_stat_start(struct tracer_stat *trace)
{
struct ftrace_profile_stat *stat =
container_of(trace, struct ftrace_profile_stat, stat);
if (!stat || !stat->start)
return NULL;
return function_stat_next(&stat->start->records[0], 0);
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* function graph compares on total time */
static int function_stat_cmp(const void *p1, const void *p2)
{
const struct ftrace_profile *a = p1;
const struct ftrace_profile *b = p2;
if (a->time < b->time)
return -1;
if (a->time > b->time)
return 1;
else
return 0;
}
#else
/* not function graph compares against hits */
static int function_stat_cmp(const void *p1, const void *p2)
{
const struct ftrace_profile *a = p1;
const struct ftrace_profile *b = p2;
if (a->counter < b->counter)
return -1;
if (a->counter > b->counter)
return 1;
else
return 0;
}
#endif
static int function_stat_headers(struct seq_file *m)
{
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
seq_puts(m, " Function "
"Hit Time Avg s^2\n"
" -------- "
"--- ---- --- ---\n");
#else
seq_puts(m, " Function Hit\n"
" -------- ---\n");
#endif
return 0;
}
static int function_stat_show(struct seq_file *m, void *v)
{
struct trace_array *tr = trace_get_global_array();
struct ftrace_profile *rec = v;
const char *refsymbol = NULL;
char str[KSYM_SYMBOL_LEN];
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static struct trace_seq s;
unsigned long long avg;
unsigned long long stddev;
unsigned long long stddev_denom;
#endif
guard(mutex)(&ftrace_profile_lock);
/* we raced with function_profile_reset() */
if (unlikely(rec->counter == 0))
return -EBUSY;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
avg = div64_ul(rec->time, rec->counter);
if (tracing_thresh && (avg < tracing_thresh))
return 0;
#endif
if (tr->trace_flags & TRACE_ITER(PROF_TEXT_OFFSET)) {
unsigned long offset;
if (core_kernel_text(rec->ip)) {
refsymbol = "_text";
offset = rec->ip - (unsigned long)_text;
} else {
struct module *mod;
guard(rcu)();
mod = __module_text_address(rec->ip);
if (mod) {
refsymbol = mod->name;
/* Calculate offset from module's text entry address. */
offset = rec->ip - (unsigned long)mod->mem[MOD_TEXT].base;
}
}
if (refsymbol)
snprintf(str, sizeof(str), " %s+%#lx", refsymbol, offset);
}
if (!refsymbol)
kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
seq_printf(m, " %-30.30s %10lu", str, rec->counter);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
seq_puts(m, " ");
/*
* Variance formula:
* s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
* Maybe Welford's method is better here?
* Divide only by 1000 for ns^2 -> us^2 conversion.
* trace_print_graph_duration will divide by 1000 again.
*/
stddev = 0;
stddev_denom = rec->counter * (rec->counter - 1) * 1000;
if (stddev_denom) {
stddev = rec->counter * rec->time_squared -
rec->time * rec->time;
stddev = div64_ul(stddev, stddev_denom);
}
trace_seq_init(&s);
trace_print_graph_duration(rec->time, &s);
trace_seq_puts(&s, " ");
trace_print_graph_duration(avg, &s);
trace_seq_puts(&s, " ");
trace_print_graph_duration(stddev, &s);
trace_print_seq(m, &s);
#endif
seq_putc(m, '\n');
return 0;
}
static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
{
struct ftrace_profile_page *pg;
pg = stat->pages = stat->start;
while (pg) {
memset(pg->records, 0, PROFILE_RECORDS_SIZE);
pg->index = 0;
pg = pg->next;
}
memset(stat->hash, 0,
FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
}
static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
{
struct ftrace_profile_page *pg;
int functions;
int pages;
int i;
/* If we already allocated, do nothing */
if (stat->pages)
return 0;
stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
if (!stat->pages)
return -ENOMEM;
#ifdef CONFIG_DYNAMIC_FTRACE
functions = ftrace_update_tot_cnt;
#else
/*
* We do not know the number of functions that exist because
* dynamic tracing is what counts them. With past experience
* we have around 20K functions. That should be more than enough.
* It is highly unlikely we will execute every function in
* the kernel.
*/
functions = 20000;
#endif
pg = stat->start = stat->pages;
pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
for (i = 1; i < pages; i++) {
pg->next = (void *)get_zeroed_page(GFP_KERNEL);
if (!pg->next)
goto out_free;
pg = pg->next;
}
return 0;
out_free:
pg = stat->start;
while (pg) {
unsigned long tmp = (unsigned long)pg;
pg = pg->next;
free_page(tmp);
}
stat->pages = NULL;
stat->start = NULL;
return -ENOMEM;
}
static int ftrace_profile_init_cpu(int cpu)
{
struct ftrace_profile_stat *stat;
int size;
stat = &per_cpu(ftrace_profile_stats, cpu);
if (stat->hash) {
/* If the profile is already created, simply reset it */
ftrace_profile_reset(stat);
return 0;
}
/*
* We are profiling all functions, but usually only a few thousand
* functions are hit. We'll make a hash of 1024 items.
*/
size = FTRACE_PROFILE_HASH_SIZE;
stat->hash = kzalloc_objs(struct hlist_head, size);
if (!stat->hash)
return -ENOMEM;
/* Preallocate the function profiling pages */
if (ftrace_profile_pages_init(stat) < 0) {
kfree(stat->hash);
stat->hash = NULL;
return -ENOMEM;
}
return 0;
}
static int ftrace_profile_init(void)
{
int cpu;
int ret = 0;
for_each_possible_cpu(cpu) {
ret = ftrace_profile_init_cpu(cpu);
if (ret)
break;
}
return ret;
}
/* interrupts must be disabled */
static struct ftrace_profile *
ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
{
struct ftrace_profile *rec;
struct hlist_head *hhd;
unsigned long key;
key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
hhd = &stat->hash[key];
if (hlist_empty(hhd))
return NULL;
hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
if (rec->ip == ip)
return rec;
}
return NULL;
}
static void ftrace_add_profile(struct ftrace_profile_stat *stat,
struct ftrace_profile *rec)
{
unsigned long key;
key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
hlist_add_head_rcu(&rec->node, &stat->hash[key]);
}
/*
* The memory is already allocated, this simply finds a new record to use.
*/
static struct ftrace_profile *
ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
{
struct ftrace_profile *rec = NULL;
/* prevent recursion (from NMIs) */
if (atomic_inc_return(&stat->disabled) != 1)
goto out;
/*
* Try to find the function again since an NMI
* could have added it
*/
rec = ftrace_find_profiled_func(stat, ip);
if (rec)
goto out;
if (stat->pages->index == PROFILES_PER_PAGE) {
if (!stat->pages->next)
goto out;
stat->pages = stat->pages->next;
}
rec = &stat->pages->records[stat->pages->index++];
rec->ip = ip;
ftrace_add_profile(stat, rec);
out:
atomic_dec(&stat->disabled);
return rec;
}
static void
function_profile_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
struct ftrace_profile_stat *stat;
struct ftrace_profile *rec;
if (!ftrace_profile_enabled)
return;
guard(preempt_notrace)();
stat = this_cpu_ptr(&ftrace_profile_stats);
if (!stat->hash || !ftrace_profile_enabled)
return;
rec = ftrace_find_profiled_func(stat, ip);
if (!rec) {
rec = ftrace_profile_alloc(stat, ip);
if (!rec)
return;
}
rec->counter++;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static bool fgraph_graph_time = true;
void ftrace_graph_graph_time_control(bool enable)
{
fgraph_graph_time = enable;
}
struct profile_fgraph_data {
unsigned long long calltime;
unsigned long long subtime;
unsigned long long sleeptime;
};
static int profile_graph_entry(struct ftrace_graph_ent *trace,
struct fgraph_ops *gops,
struct ftrace_regs *fregs)
{
struct profile_fgraph_data *profile_data;
function_profile_call(trace->func, 0, NULL, NULL);
/* If function graph is shutting down, ret_stack can be NULL */
if (!current->ret_stack)
return 0;
profile_data = fgraph_reserve_data(gops->idx, sizeof(*profile_data));
if (!profile_data)
return 0;
profile_data->subtime = 0;
profile_data->sleeptime = current->ftrace_sleeptime;
profile_data->calltime = trace_clock_local();
return 1;
}
bool fprofile_no_sleep_time;
static void profile_graph_return(struct ftrace_graph_ret *trace,
struct fgraph_ops *gops,
struct ftrace_regs *fregs)
{
struct profile_fgraph_data *profile_data;
struct ftrace_profile_stat *stat;
unsigned long long calltime;
unsigned long long rettime = trace_clock_local();
struct ftrace_profile *rec;
int size;
guard(preempt_notrace)();
stat = this_cpu_ptr(&ftrace_profile_stats);
if (!stat->hash || !ftrace_profile_enabled)
return;
profile_data = fgraph_retrieve_data(gops->idx, &size);
/* If the calltime was zero'd ignore it */
if (!profile_data || !profile_data->calltime)
return;
calltime = rettime - profile_data->calltime;
if (fprofile_no_sleep_time) {
if (current->ftrace_sleeptime)
calltime -= current->ftrace_sleeptime - profile_data->sleeptime;
}
if (!fgraph_graph_time) {
struct profile_fgraph_data *parent_data;
/* Append this call time to the parent time to subtract */
parent_data = fgraph_retrieve_parent_data(gops->idx, &size, 1);
if (parent_data)
parent_data->subtime += calltime;
if (profile_data->subtime && profile_data->subtime < calltime)
calltime -= profile_data->subtime;
else
calltime = 0;
}
rec = ftrace_find_profiled_func(stat, trace->func);
if (rec) {
rec->time += calltime;
rec->time_squared += calltime * calltime;
}
}
static struct fgraph_ops fprofiler_ops = {
.entryfunc = &profile_graph_entry,
.retfunc = &profile_graph_return,
};
static int register_ftrace_profiler(void)
{
ftrace_ops_set_global_filter(&fprofiler_ops.ops);
return register_ftrace_graph(&fprofiler_ops);
}
static void unregister_ftrace_profiler(void)
{
unregister_ftrace_graph(&fprofiler_ops);
}
#else
static struct ftrace_ops ftrace_profile_ops __read_mostly = {
.func = function_profile_call,
};
static int register_ftrace_profiler(void)
{
ftrace_ops_set_global_filter(&ftrace_profile_ops);
return register_ftrace_function(&ftrace_profile_ops);
}
static void unregister_ftrace_profiler(void)
{
unregister_ftrace_function(&ftrace_profile_ops);
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
static ssize_t
ftrace_profile_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
unsigned long val;
int ret;
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
val = !!val;
guard(mutex)(&ftrace_profile_lock);
if (ftrace_profile_enabled ^ val) {
if (val) {
ret = ftrace_profile_init();
if (ret < 0)
return ret;
ret = register_ftrace_profiler();
if (ret < 0)
return ret;
ftrace_profile_enabled = 1;
} else {
ftrace_profile_enabled = 0;
/*
* unregister_ftrace_profiler calls stop_machine
* so this acts like an synchronize_rcu.
*/
unregister_ftrace_profiler();
}
}
*ppos += cnt;
return cnt;
}
static ssize_t
ftrace_profile_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64]; /* big enough to hold a number */
int r;
r = sprintf(buf, "%u\n", ftrace_profile_enabled);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
static const struct file_operations ftrace_profile_fops = {
.open = tracing_open_generic,
.read = ftrace_profile_read,
.write = ftrace_profile_write,
.llseek = default_llseek,
};
/* used to initialize the real stat files */
static struct tracer_stat function_stats __initdata = {
.name = "functions",
.stat_start = function_stat_start,
.stat_next = function_stat_next,
.stat_cmp = function_stat_cmp,
.stat_headers = function_stat_headers,
.stat_show = function_stat_show
};
static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
{
struct ftrace_profile_stat *stat;
char *name;
int ret;
int cpu;
for_each_possible_cpu(cpu) {
stat = &per_cpu(ftrace_profile_stats, cpu);
name = kasprintf(GFP_KERNEL, "function%d", cpu);
if (!name) {
/*
* The files created are permanent, if something happens
* we still do not free memory.
*/
WARN(1,
"Could not allocate stat file for cpu %d\n",
cpu);
return;
}
stat->stat = function_stats;
stat->stat.name = name;
ret = register_stat_tracer(&stat->stat);
if (ret) {
WARN(1,
"Could not register function stat for cpu %d\n",
cpu);
kfree(name);
return;
}
}
trace_create_file("function_profile_enabled",
TRACE_MODE_WRITE, d_tracer, NULL,
&ftrace_profile_fops);
}
#else /* CONFIG_FUNCTION_PROFILER */
static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
{
}
#endif /* CONFIG_FUNCTION_PROFILER */
#ifdef CONFIG_DYNAMIC_FTRACE
static struct ftrace_ops *removed_ops;
/*
* Set when doing a global update, like enabling all recs or disabling them.
* It is not set when just updating a single ftrace_ops.
*/
static bool update_all_ops;
struct ftrace_func_probe {
struct ftrace_probe_ops *probe_ops;
struct ftrace_ops ops;
struct trace_array *tr;
struct list_head list;
void *data;
int ref;
};
/*
* We make these constant because no one should touch them,
* but they are used as the default "empty hash", to avoid allocating
* it all the time. These are in a read only section such that if
* anyone does try to modify it, it will cause an exception.
*/
static const struct hlist_head empty_buckets[1];
static const struct ftrace_hash empty_hash = {
.buckets = (struct hlist_head *)empty_buckets,
};
#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
struct ftrace_ops global_ops = {
.func = ftrace_stub,
.local_hash.notrace_hash = EMPTY_HASH,
.local_hash.filter_hash = EMPTY_HASH,
INIT_OPS_HASH(global_ops)
.flags = FTRACE_OPS_FL_INITIALIZED |
FTRACE_OPS_FL_PID,
};
/*
* Used by the stack unwinder to know about dynamic ftrace trampolines.
*/
struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
{
struct ftrace_ops *op = NULL;
/*
* Some of the ops may be dynamically allocated,
* they are freed after a synchronize_rcu().
*/
preempt_disable_notrace();
do_for_each_ftrace_op(op, ftrace_ops_list) {
/*
* This is to check for dynamically allocated trampolines.
* Trampolines that are in kernel text will have
* core_kernel_text() return true.
*/
if (op->trampoline && op->trampoline_size)
if (addr >= op->trampoline &&
addr < op->trampoline + op->trampoline_size) {
preempt_enable_notrace();
return op;
}
} while_for_each_ftrace_op(op);
preempt_enable_notrace();
return NULL;
}
/*
* This is used by __kernel_text_address() to return true if the
* address is on a dynamically allocated trampoline that would
* not return true for either core_kernel_text() or
* is_module_text_address().
*/
bool is_ftrace_trampoline(unsigned long addr)
{
return ftrace_ops_trampoline(addr) != NULL;
}
struct ftrace_page {
struct ftrace_page *next;
struct dyn_ftrace *records;
int index;
int order;
};
#define ENTRY_SIZE sizeof(struct dyn_ftrace)
#define ENTRIES_PER_PAGE_GROUP(order) ((PAGE_SIZE << (order)) / ENTRY_SIZE)
static struct ftrace_page *ftrace_pages_start;
static struct ftrace_page *ftrace_pages;
static __always_inline unsigned long
ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
{
if (hash->size_bits > 0)
return hash_long(ip, hash->size_bits);
return 0;
}
/* Only use this function if ftrace_hash_empty() has already been tested */
static __always_inline struct ftrace_func_entry *
__ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
{
unsigned long key;
struct ftrace_func_entry *entry;
struct hlist_head *hhd;
key = ftrace_hash_key(hash, ip);
hhd = &hash->buckets[key];
hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
if (entry->ip == ip)
return entry;
}
return NULL;
}
/**
* ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
* @hash: The hash to look at
* @ip: The instruction pointer to test
*
* Search a given @hash to see if a given instruction pointer (@ip)
* exists in it.
*
* Returns: the entry that holds the @ip if found. NULL otherwise.
*/
struct ftrace_func_entry *
ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
{
if (ftrace_hash_empty(hash))
return NULL;
return __ftrace_lookup_ip(hash, ip);
}
static void __add_hash_entry(struct ftrace_hash *hash,
struct ftrace_func_entry *entry)
{
struct hlist_head *hhd;
unsigned long key;
key = ftrace_hash_key(hash, entry->ip);
hhd = &hash->buckets[key];
hlist_add_head(&entry->hlist, hhd);
hash->count++;
}
struct ftrace_func_entry *
add_ftrace_hash_entry_direct(struct ftrace_hash *hash, unsigned long ip, unsigned long direct)
{
struct ftrace_func_entry *entry;
entry = kmalloc_obj(*entry);
if (!entry)
return NULL;
entry->ip = ip;
entry->direct = direct;
__add_hash_entry(hash, entry);
return entry;
}
static struct ftrace_func_entry *
add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
{
return add_ftrace_hash_entry_direct(hash, ip, 0);
}
static void
free_hash_entry(struct ftrace_hash *hash,
struct ftrace_func_entry *entry)
{
hlist_del(&entry->hlist);
kfree(entry);
hash->count--;
}
static void
remove_hash_entry(struct ftrace_hash *hash,
struct ftrace_func_entry *entry)
{
hlist_del_rcu(&entry->hlist);
hash->count--;
}
static void ftrace_hash_clear(struct ftrace_hash *hash)
{
struct hlist_head *hhd;
struct hlist_node *tn;
struct ftrace_func_entry *entry;
int size = 1 << hash->size_bits;
int i;
if (!hash->count)
return;
for (i = 0; i < size; i++) {
hhd = &hash->buckets[i];
hlist_for_each_entry_safe(entry, tn, hhd, hlist)
free_hash_entry(hash, entry);
}
FTRACE_WARN_ON(hash->count);
}
static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
{
list_del(&ftrace_mod->list);
kfree(ftrace_mod->module);
kfree(ftrace_mod->func);
kfree(ftrace_mod);
}
static void clear_ftrace_mod_list(struct list_head *head)
{
struct ftrace_mod_load *p, *n;
/* stack tracer isn't supported yet */
if (!head)
return;
mutex_lock(&ftrace_lock);
list_for_each_entry_safe(p, n, head, list)
free_ftrace_mod(p);
mutex_unlock(&ftrace_lock);
}
void free_ftrace_hash(struct ftrace_hash *hash)
{
if (!hash || hash == EMPTY_HASH)
return;
ftrace_hash_clear(hash);
kfree(hash->buckets);
kfree(hash);
}
static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
{
struct ftrace_hash *hash;
hash = container_of(rcu, struct ftrace_hash, rcu);
free_ftrace_hash(hash);
}
static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
{
if (!hash || hash == EMPTY_HASH)
return;
call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
}
/**
* ftrace_free_filter - remove all filters for an ftrace_ops
* @ops: the ops to remove the filters from
*/
void ftrace_free_filter(struct ftrace_ops *ops)
{
ftrace_ops_init(ops);
if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
return;
free_ftrace_hash(ops->func_hash->filter_hash);
free_ftrace_hash(ops->func_hash->notrace_hash);
ops->func_hash->filter_hash = EMPTY_HASH;
ops->func_hash->notrace_hash = EMPTY_HASH;
}
EXPORT_SYMBOL_GPL(ftrace_free_filter);
struct ftrace_hash *alloc_ftrace_hash(int size_bits)
{
struct ftrace_hash *hash;
int size;
hash = kzalloc_obj(*hash);
if (!hash)
return NULL;
size = 1 << size_bits;
hash->buckets = kzalloc_objs(*hash->buckets, size);
if (!hash->buckets) {
kfree(hash);
return NULL;
}
hash->size_bits = size_bits;
return hash;
}
/* Used to save filters on functions for modules not loaded yet */
static int ftrace_add_mod(struct trace_array *tr,
const char *func, const char *module,
int enable)
{
struct ftrace_mod_load *ftrace_mod;
struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
ftrace_mod = kzalloc_obj(*ftrace_mod);
if (!ftrace_mod)
return -ENOMEM;
INIT_LIST_HEAD(&ftrace_mod->list);
ftrace_mod->func = kstrdup(func, GFP_KERNEL);
ftrace_mod->module = kstrdup(module, GFP_KERNEL);
ftrace_mod->enable = enable;
if (!ftrace_mod->func || !ftrace_mod->module)
goto out_free;
list_add(&ftrace_mod->list, mod_head);
return 0;
out_free:
free_ftrace_mod(ftrace_mod);
return -ENOMEM;
}
static struct ftrace_hash *
alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
{
struct ftrace_func_entry *entry;
struct ftrace_hash *new_hash;
int size;
int i;
new_hash = alloc_ftrace_hash(size_bits);
if (!new_hash)
return NULL;
if (hash)
new_hash->flags = hash->flags;
/* Empty hash? */
if (ftrace_hash_empty(hash))
return new_hash;
size = 1 << hash->size_bits;
for (i = 0; i < size; i++) {
hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
if (add_ftrace_hash_entry_direct(new_hash, entry->ip, entry->direct) == NULL)
goto free_hash;
}
}
FTRACE_WARN_ON(new_hash->count != hash->count);
return new_hash;
free_hash:
free_ftrace_hash(new_hash);
return NULL;
}
static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops);
static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops);
static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
struct ftrace_hash *new_hash);
/*
* Allocate a new hash and remove entries from @src and move them to the new hash.
* On success, the @src hash will be empty and should be freed.
*/
static struct ftrace_hash *__move_hash(struct ftrace_hash *src, int size)
{
struct ftrace_func_entry *entry;
struct ftrace_hash *new_hash;
struct hlist_head *hhd;
struct hlist_node *tn;
int bits = 0;
int i;
/*
* Use around half the size (max bit of it), but
* a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits).
*/
bits = fls(size / 2);
/* Don't allocate too much */
if (bits > FTRACE_HASH_MAX_BITS)
bits = FTRACE_HASH_MAX_BITS;
new_hash = alloc_ftrace_hash(bits);
if (!new_hash)
return NULL;
new_hash->flags = src->flags;
size = 1 << src->size_bits;
for (i = 0; i < size; i++) {
hhd = &src->buckets[i];
hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
remove_hash_entry(src, entry);
__add_hash_entry(new_hash, entry);
}
}
return new_hash;
}
/* Move the @src entries to a newly allocated hash */
static struct ftrace_hash *
__ftrace_hash_move(struct ftrace_hash *src)
{
int size = src->count;
/*
* If the new source is empty, just return the empty_hash.
*/
if (ftrace_hash_empty(src))
return EMPTY_HASH;
return __move_hash(src, size);
}
/**
* ftrace_hash_move - move a new hash to a filter and do updates
* @ops: The ops with the hash that @dst points to
* @enable: True if for the filter hash, false for the notrace hash
* @dst: Points to the @ops hash that should be updated
* @src: The hash to update @dst with
*
* This is called when an ftrace_ops hash is being updated and the
* the kernel needs to reflect this. Note, this only updates the kernel
* function callbacks if the @ops is enabled (not to be confused with
* @enable above). If the @ops is enabled, its hash determines what
* callbacks get called. This function gets called when the @ops hash
* is updated and it requires new callbacks.
*
* On success the elements of @src is moved to @dst, and @dst is updated
* properly, as well as the functions determined by the @ops hashes
* are now calling the @ops callback function.
*
* Regardless of return type, @src should be freed with free_ftrace_hash().
*/
static int
ftrace_hash_move(struct ftrace_ops *ops, int enable,
struct ftrace_hash **dst, struct ftrace_hash *src)
{
struct ftrace_hash *new_hash;
int ret;
/* Reject setting notrace hash on IPMODIFY ftrace_ops */
if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
return -EINVAL;
new_hash = __ftrace_hash_move(src);
if (!new_hash)
return -ENOMEM;
/* Make sure this can be applied if it is IPMODIFY ftrace_ops */
if (enable) {
/* IPMODIFY should be updated only when filter_hash updating */
ret = ftrace_hash_ipmodify_update(ops, new_hash);
if (ret < 0) {
free_ftrace_hash(new_hash);
return ret;
}
}
/*
* Remove the current set, update the hash and add
* them back.
*/
ftrace_hash_rec_disable_modify(ops);
rcu_assign_pointer(*dst, new_hash);
ftrace_hash_rec_enable_modify(ops);
return 0;
}
static bool hash_contains_ip(unsigned long ip,
struct ftrace_ops_hash *hash)
{
/*
* The function record is a match if it exists in the filter
* hash and not in the notrace hash. Note, an empty hash is
* considered a match for the filter hash, but an empty
* notrace hash is considered not in the notrace hash.
*/
return (ftrace_hash_empty(hash->filter_hash) ||
__ftrace_lookup_ip(hash->filter_hash, ip)) &&
(ftrace_hash_empty(hash->notrace_hash) ||
!__ftrace_lookup_ip(hash->notrace_hash, ip));
}
/*
* Test the hashes for this ops to see if we want to call
* the ops->func or not.
*
* It's a match if the ip is in the ops->filter_hash or
* the filter_hash does not exist or is empty,
* AND
* the ip is not in the ops->notrace_hash.
*
* This needs to be called with preemption disabled as
* the hashes are freed with call_rcu().
*/
int
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
{
struct ftrace_ops_hash hash;
int ret;
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
/*
* There's a small race when adding ops that the ftrace handler
* that wants regs, may be called without them. We can not
* allow that handler to be called if regs is NULL.
*/
if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
return 0;
#endif
rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
if (hash_contains_ip(ip, &hash))
ret = 1;
else
ret = 0;
return ret;
}
/*
* This is a double for. Do not use 'break' to break out of the loop,
* you must use a goto.
*/
#define do_for_each_ftrace_rec(pg, rec) \
for (pg = ftrace_pages_start; pg; pg = pg->next) { \
int _____i; \
for (_____i = 0; _____i < pg->index; _____i++) { \
rec = &pg->records[_____i];
#define while_for_each_ftrace_rec() \
} \
}
static int ftrace_cmp_recs(const void *a, const void *b)
{
const struct dyn_ftrace *key = a;
const struct dyn_ftrace *rec = b;
if (key->flags < rec->ip)
return -1;
if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
return 1;
return 0;
}
static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
{
struct ftrace_page *pg;
struct dyn_ftrace *rec = NULL;
struct dyn_ftrace key;
key.ip = start;
key.flags = end; /* overload flags, as it is unsigned long */
for (pg = ftrace_pages_start; pg; pg = pg->next) {
if (pg->index == 0 ||
end < pg->records[0].ip ||
start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
continue;
rec = bsearch(&key, pg->records, pg->index,
sizeof(struct dyn_ftrace),
ftrace_cmp_recs);
if (rec)
break;
}
return rec;
}
/**
* ftrace_location_range - return the first address of a traced location
* if it touches the given ip range
* @start: start of range to search.
* @end: end of range to search (inclusive). @end points to the last byte
* to check.
*
* Returns: rec->ip if the related ftrace location is a least partly within
* the given address range. That is, the first address of the instruction
* that is either a NOP or call to the function tracer. It checks the ftrace
* internal tables to determine if the address belongs or not.
*/
unsigned long ftrace_location_range(unsigned long start, unsigned long end)
{
struct dyn_ftrace *rec;
unsigned long ip = 0;
rcu_read_lock();
rec = lookup_rec(start, end);
if (rec)
ip = rec->ip;
rcu_read_unlock();
return ip;
}
/**
* ftrace_location - return the ftrace location
* @ip: the instruction pointer to check
*
* Returns:
* * If @ip matches the ftrace location, return @ip.
* * If @ip matches sym+0, return sym's ftrace location.
* * Otherwise, return 0.
*/
unsigned long ftrace_location(unsigned long ip)
{
unsigned long loc;
unsigned long offset;
unsigned long size;
loc = ftrace_location_range(ip, ip);
if (!loc) {
if (!kallsyms_lookup_size_offset(ip, &size, &offset))
return 0;
/* map sym+0 to __fentry__ */
if (!offset)
loc = ftrace_location_range(ip, ip + size - 1);
}
return loc;
}
/**
* ftrace_text_reserved - return true if range contains an ftrace location
* @start: start of range to search
* @end: end of range to search (inclusive). @end points to the last byte to check.
*
* Returns: 1 if @start and @end contains a ftrace location.
* That is, the instruction that is either a NOP or call to
* the function tracer. It checks the ftrace internal tables to
* determine if the address belongs or not.
*/
int ftrace_text_reserved(const void *start, const void *end)
{
unsigned long ret;
ret = ftrace_location_range((unsigned long)start,
(unsigned long)end);
return (int)!!ret;
}
/* Test if ops registered to this rec needs regs */
static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
{
struct ftrace_ops *ops;
bool keep_regs = false;
for (ops = ftrace_ops_list;
ops != &ftrace_list_end; ops = ops->next) {
/* pass rec in as regs to have non-NULL val */
if (ftrace_ops_test(ops, rec->ip, rec)) {
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
keep_regs = true;
break;
}
}
}
return keep_regs;
}
static struct ftrace_ops *
ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
static struct ftrace_ops *
ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
static struct ftrace_ops *
ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
static bool skip_record(struct dyn_ftrace *rec)
{
/*
* At boot up, weak functions are set to disable. Function tracing
* can be enabled before they are, and they still need to be disabled now.
* If the record is disabled, still continue if it is marked as already
* enabled (this is needed to keep the accounting working).
*/
return rec->flags & FTRACE_FL_DISABLED &&
!(rec->flags & FTRACE_FL_ENABLED);
}
/*
* This is the main engine to the ftrace updates to the dyn_ftrace records.
*
* It will iterate through all the available ftrace functions
* (the ones that ftrace can have callbacks to) and set the flags
* in the associated dyn_ftrace records.
*
* @inc: If true, the functions associated to @ops are added to
* the dyn_ftrace records, otherwise they are removed.
*/
static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
bool inc)
{
struct ftrace_hash *hash;
struct ftrace_hash *notrace_hash;
struct ftrace_page *pg;
struct dyn_ftrace *rec;
bool update = false;
int count = 0;
int all = false;
/* Only update if the ops has been registered */
if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
return false;
/*
* If the count is zero, we update all records.
* Otherwise we just update the items in the hash.
*/
hash = ops->func_hash->filter_hash;
notrace_hash = ops->func_hash->notrace_hash;
if (ftrace_hash_empty(hash))
all = true;
do_for_each_ftrace_rec(pg, rec) {
int in_notrace_hash = 0;
int in_hash = 0;
int match = 0;
if (skip_record(rec))
continue;
if (all) {
/*
* Only the filter_hash affects all records.
* Update if the record is not in the notrace hash.
*/
if (!notrace_hash || !ftrace_lookup_ip(notrace_hash, rec->ip))
match = 1;
} else {
in_hash = !!ftrace_lookup_ip(hash, rec->ip);
in_notrace_hash = !!ftrace_lookup_ip(notrace_hash, rec->ip);
/*
* We want to match all functions that are in the hash but
* not in the other hash.
*/
if (in_hash && !in_notrace_hash)
match = 1;
}
if (!match)
continue;
if (inc) {
rec->flags++;
if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
return false;
if (ops->flags & FTRACE_OPS_FL_DIRECT)
rec->flags |= FTRACE_FL_DIRECT;
/*
* If there's only a single callback registered to a
* function, and the ops has a trampoline registered
* for it, then we can call it directly.
*/
if (ftrace_rec_count(rec) == 1 && ops->trampoline)
rec->flags |= FTRACE_FL_TRAMP;
else
/*
* If we are adding another function callback
* to this function, and the previous had a
* custom trampoline in use, then we need to go
* back to the default trampoline.
*/
rec->flags &= ~FTRACE_FL_TRAMP;
/*
* If any ops wants regs saved for this function
* then all ops will get saved regs.
*/
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
rec->flags |= FTRACE_FL_REGS;
} else {
if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
return false;
rec->flags--;
/*
* Only the internal direct_ops should have the
* DIRECT flag set. Thus, if it is removing a
* function, then that function should no longer
* be direct.
*/
if (ops->flags & FTRACE_OPS_FL_DIRECT)
rec->flags &= ~FTRACE_FL_DIRECT;
/*
* If the rec had REGS enabled and the ops that is
* being removed had REGS set, then see if there is
* still any ops for this record that wants regs.
* If not, we can stop recording them.
*/
if (ftrace_rec_count(rec) > 0 &&
rec->flags & FTRACE_FL_REGS &&
ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
if (!test_rec_ops_needs_regs(rec))
rec->flags &= ~FTRACE_FL_REGS;
}
/*
* The TRAMP needs to be set only if rec count
* is decremented to one, and the ops that is
* left has a trampoline. As TRAMP can only be
* enabled if there is only a single ops attached
* to it.
*/
if (ftrace_rec_count(rec) == 1 &&
ftrace_find_tramp_ops_any_other(rec, ops))
rec->flags |= FTRACE_FL_TRAMP;
else
rec->flags &= ~FTRACE_FL_TRAMP;
/*
* flags will be cleared in ftrace_check_record()
* if rec count is zero.