Merge branch 'master' of git://1984.lsi.us.es/nf
[cascardo/linux.git] / kernel / trace / ftrace.c
index 41473b4..ab25b88 100644 (file)
@@ -111,6 +111,26 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
 #endif
 
+/*
+ * Traverse the ftrace_global_list, invoking all entries.  The reason that we
+ * can use rcu_dereference_raw() is that elements removed from this list
+ * are simply leaked, so there is no need to interact with a grace-period
+ * mechanism.  The rcu_dereference_raw() calls are needed to handle
+ * concurrent insertions into the ftrace_global_list.
+ *
+ * Silly Alpha and silly pointer-speculation compiler optimizations!
+ */
+#define do_for_each_ftrace_op(op, list)                        \
+       op = rcu_dereference_raw(list);                 \
+       do
+
+/*
+ * Optimized for just a single item in the list (as that is the normal case).
+ */
+#define while_for_each_ftrace_op(op)                           \
+       while (likely(op = rcu_dereference_raw((op)->next)) &&  \
+              unlikely((op) != &ftrace_list_end))
+
 /**
  * ftrace_nr_registered_ops - return number of ops registered
  *
@@ -132,29 +152,21 @@ int ftrace_nr_registered_ops(void)
        return cnt;
 }
 
-/*
- * Traverse the ftrace_global_list, invoking all entries.  The reason that we
- * can use rcu_dereference_raw() is that elements removed from this list
- * are simply leaked, so there is no need to interact with a grace-period
- * mechanism.  The rcu_dereference_raw() calls are needed to handle
- * concurrent insertions into the ftrace_global_list.
- *
- * Silly Alpha and silly pointer-speculation compiler optimizations!
- */
 static void
 ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
                        struct ftrace_ops *op, struct pt_regs *regs)
 {
-       if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
+       int bit;
+
+       bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
+       if (bit < 0)
                return;
 
-       trace_recursion_set(TRACE_GLOBAL_BIT);
-       op = rcu_dereference_raw(ftrace_global_list); /*see above*/
-       while (op != &ftrace_list_end) {
+       do_for_each_ftrace_op(op, ftrace_global_list) {
                op->func(ip, parent_ip, op, regs);
-               op = rcu_dereference_raw(op->next); /*see above*/
-       };
-       trace_recursion_clear(TRACE_GLOBAL_BIT);
+       } while_for_each_ftrace_op(op);
+
+       trace_clear_recursion(bit);
 }
 
 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
@@ -221,10 +233,24 @@ static void update_global_ops(void)
         * registered callers.
         */
        if (ftrace_global_list == &ftrace_list_end ||
-           ftrace_global_list->next == &ftrace_list_end)
+           ftrace_global_list->next == &ftrace_list_end) {
                func = ftrace_global_list->func;
-       else
+               /*
+                * As we are calling the function directly.
+                * If it does not have recursion protection,
+                * the function_trace_op needs to be updated
+                * accordingly.
+                */
+               if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)
+                       global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
+               else
+                       global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
+       } else {
                func = ftrace_global_list_func;
+               /* The list has its own recursion protection. */
+               global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
+       }
+
 
        /* If we filter on pids, update to use the pid function */
        if (!list_empty(&ftrace_pids)) {
@@ -337,7 +363,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
        if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
                return -EINVAL;
 
-#ifndef ARCH_SUPPORTS_FTRACE_SAVE_REGS
+#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
        /*
         * If the ftrace_ops specifies SAVE_REGS, then it only can be used
         * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
@@ -736,7 +762,6 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
 {
        struct ftrace_profile *rec;
        struct hlist_head *hhd;
-       struct hlist_node *n;
        unsigned long key;
 
        key = hash_long(ip, ftrace_profile_bits);
@@ -745,7 +770,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
        if (hlist_empty(hhd))
                return NULL;
 
-       hlist_for_each_entry_rcu(rec, n, hhd, node) {
+       hlist_for_each_entry_rcu(rec, hhd, node) {
                if (rec->ip == ip)
                        return rec;
        }
@@ -1107,7 +1132,6 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
        unsigned long key;
        struct ftrace_func_entry *entry;
        struct hlist_head *hhd;
-       struct hlist_node *n;
 
        if (ftrace_hash_empty(hash))
                return NULL;
@@ -1119,7 +1143,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
 
        hhd = &hash->buckets[key];
 
-       hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
+       hlist_for_each_entry_rcu(entry, hhd, hlist) {
                if (entry->ip == ip)
                        return entry;
        }
@@ -1176,7 +1200,7 @@ remove_hash_entry(struct ftrace_hash *hash,
 static void ftrace_hash_clear(struct ftrace_hash *hash)
 {
        struct hlist_head *hhd;
-       struct hlist_node *tp, *tn;
+       struct hlist_node *tn;
        struct ftrace_func_entry *entry;
        int size = 1 << hash->size_bits;
        int i;
@@ -1186,7 +1210,7 @@ static void ftrace_hash_clear(struct ftrace_hash *hash)
 
        for (i = 0; i < size; i++) {
                hhd = &hash->buckets[i];
-               hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
+               hlist_for_each_entry_safe(entry, tn, hhd, hlist)
                        free_hash_entry(hash, entry);
        }
        FTRACE_WARN_ON(hash->count);
@@ -1249,7 +1273,6 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
 {
        struct ftrace_func_entry *entry;
        struct ftrace_hash *new_hash;
-       struct hlist_node *tp;
        int size;
        int ret;
        int i;
@@ -1264,7 +1287,7 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
 
        size = 1 << hash->size_bits;
        for (i = 0; i < size; i++) {
-               hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
+               hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
                        ret = add_hash_entry(new_hash, entry->ip);
                        if (ret < 0)
                                goto free_hash;
@@ -1290,7 +1313,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
                 struct ftrace_hash **dst, struct ftrace_hash *src)
 {
        struct ftrace_func_entry *entry;
-       struct hlist_node *tp, *tn;
+       struct hlist_node *tn;
        struct hlist_head *hhd;
        struct ftrace_hash *old_hash;
        struct ftrace_hash *new_hash;
@@ -1336,7 +1359,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
        size = 1 << src->size_bits;
        for (i = 0; i < size; i++) {
                hhd = &src->buckets[i];
-               hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
+               hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
                        if (bits > 0)
                                key = hash_long(entry->ip, bits);
                        else
@@ -2875,7 +2898,6 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
 {
        struct ftrace_func_probe *entry;
        struct hlist_head *hhd;
-       struct hlist_node *n;
        unsigned long key;
 
        key = hash_long(ip, FTRACE_HASH_BITS);
@@ -2891,7 +2913,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
         * on the hash. rcu_read_lock is too dangerous here.
         */
        preempt_disable_notrace();
-       hlist_for_each_entry_rcu(entry, n, hhd, node) {
+       hlist_for_each_entry_rcu(entry, hhd, node) {
                if (entry->ip == ip)
                        entry->ops->func(ip, parent_ip, &entry->data);
        }
@@ -3042,7 +3064,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                                  void *data, int flags)
 {
        struct ftrace_func_probe *entry;
-       struct hlist_node *n, *tmp;
+       struct hlist_node *tmp;
        char str[KSYM_SYMBOL_LEN];
        int type = MATCH_FULL;
        int i, len = 0;
@@ -3065,7 +3087,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
                struct hlist_head *hhd = &ftrace_func_hash[i];
 
-               hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
+               hlist_for_each_entry_safe(entry, tmp, hhd, node) {
 
                        /* break up if statements for readability */
                        if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
@@ -3970,37 +3992,51 @@ static void ftrace_init_module(struct module *mod,
        ftrace_process_locs(mod, start, end);
 }
 
-static int ftrace_module_notify(struct notifier_block *self,
-                               unsigned long val, void *data)
+static int ftrace_module_notify_enter(struct notifier_block *self,
+                                     unsigned long val, void *data)
 {
        struct module *mod = data;
 
-       switch (val) {
-       case MODULE_STATE_COMING:
+       if (val == MODULE_STATE_COMING)
                ftrace_init_module(mod, mod->ftrace_callsites,
                                   mod->ftrace_callsites +
                                   mod->num_ftrace_callsites);
-               break;
-       case MODULE_STATE_GOING:
+       return 0;
+}
+
+static int ftrace_module_notify_exit(struct notifier_block *self,
+                                    unsigned long val, void *data)
+{
+       struct module *mod = data;
+
+       if (val == MODULE_STATE_GOING)
                ftrace_release_mod(mod);
-               break;
-       }
 
        return 0;
 }
 #else
-static int ftrace_module_notify(struct notifier_block *self,
-                               unsigned long val, void *data)
+static int ftrace_module_notify_enter(struct notifier_block *self,
+                                     unsigned long val, void *data)
+{
+       return 0;
+}
+static int ftrace_module_notify_exit(struct notifier_block *self,
+                                    unsigned long val, void *data)
 {
        return 0;
 }
 #endif /* CONFIG_MODULES */
 
-struct notifier_block ftrace_module_nb = {
-       .notifier_call = ftrace_module_notify,
+struct notifier_block ftrace_module_enter_nb = {
+       .notifier_call = ftrace_module_notify_enter,
        .priority = INT_MAX,    /* Run before anything that can use kprobes */
 };
 
+struct notifier_block ftrace_module_exit_nb = {
+       .notifier_call = ftrace_module_notify_exit,
+       .priority = INT_MIN,    /* Run after anything that can remove kprobes */
+};
+
 extern unsigned long __start_mcount_loc[];
 extern unsigned long __stop_mcount_loc[];
 
@@ -4032,9 +4068,13 @@ void __init ftrace_init(void)
                                  __start_mcount_loc,
                                  __stop_mcount_loc);
 
-       ret = register_module_notifier(&ftrace_module_nb);
+       ret = register_module_notifier(&ftrace_module_enter_nb);
        if (ret)
-               pr_warning("Failed to register trace ftrace module notifier\n");
+               pr_warning("Failed to register trace ftrace module enter notifier\n");
+
+       ret = register_module_notifier(&ftrace_module_exit_nb);
+       if (ret)
+               pr_warning("Failed to register trace ftrace module exit notifier\n");
 
        set_ftrace_early_filters();
 
@@ -4090,14 +4130,11 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
         */
        preempt_disable_notrace();
        trace_recursion_set(TRACE_CONTROL_BIT);
-       op = rcu_dereference_raw(ftrace_control_list);
-       while (op != &ftrace_list_end) {
+       do_for_each_ftrace_op(op, ftrace_control_list) {
                if (!ftrace_function_local_disabled(op) &&
                    ftrace_ops_test(op, ip))
                        op->func(ip, parent_ip, op, regs);
-
-               op = rcu_dereference_raw(op->next);
-       };
+       } while_for_each_ftrace_op(op);
        trace_recursion_clear(TRACE_CONTROL_BIT);
        preempt_enable_notrace();
 }
@@ -4112,27 +4149,26 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
                       struct ftrace_ops *ignored, struct pt_regs *regs)
 {
        struct ftrace_ops *op;
+       int bit;
 
        if (function_trace_stop)
                return;
 
-       if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
+       bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
+       if (bit < 0)
                return;
 
-       trace_recursion_set(TRACE_INTERNAL_BIT);
        /*
         * Some of the ops may be dynamically allocated,
         * they must be freed after a synchronize_sched().
         */
        preempt_disable_notrace();
-       op = rcu_dereference_raw(ftrace_ops_list);
-       while (op != &ftrace_list_end) {
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
                if (ftrace_ops_test(op, ip))
                        op->func(ip, parent_ip, op, regs);
-               op = rcu_dereference_raw(op->next);
-       };
+       } while_for_each_ftrace_op(op);
        preempt_enable_notrace();
-       trace_recursion_clear(TRACE_INTERNAL_BIT);
+       trace_clear_recursion(bit);
 }
 
 /*
@@ -4143,8 +4179,8 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
  * Archs are to support both the regs and ftrace_ops at the same time.
  * If they support ftrace_ops, it is assumed they support regs.
  * If call backs want to use regs, they must either check for regs
- * being NULL, or ARCH_SUPPORTS_FTRACE_SAVE_REGS.
- * Note, ARCH_SUPPORT_SAVE_REGS expects a full regs to be saved.
+ * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
+ * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
  * An architecture can pass partial regs with ftrace_ops and still
  * set the ARCH_SUPPORT_FTARCE_OPS.
  */