X-Git-Url: http://git.cascardo.info/?a=blobdiff_plain;f=kernel%2Ftrace%2Fftrace.c;h=0d88ce9b9fb8828c9a81fdffcd47763ae5cc2543;hb=8c8946f509a494769a8c602b5ed189df01917d39;hp=6d2cb14f9449083c9a2e78f507b9c1255c8e7ca2;hpb=2a88e7e559f2358f4e1422d0b0c0278a74136581;p=cascardo%2Flinux.git diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 6d2cb14f9449..0d88ce9b9fb8 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1883,7 +1883,6 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip) struct hlist_head *hhd; struct hlist_node *n; unsigned long key; - int resched; key = hash_long(ip, FTRACE_HASH_BITS); @@ -1897,12 +1896,12 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip) * period. This syncs the hash iteration and freeing of items * on the hash. rcu_read_lock is too dangerous here. */ - resched = ftrace_preempt_disable(); + preempt_disable_notrace(); hlist_for_each_entry_rcu(entry, n, hhd, node) { if (entry->ip == ip) entry->ops->func(ip, parent_ip, &entry->data); } - ftrace_preempt_enable(resched); + preempt_enable_notrace(); } static struct ftrace_ops trace_probe_ops __read_mostly =