Linux 6.9-rc4
[sfrench/cifs-2.6.git] / kernel / trace / ftrace.c
index 576c41644e77cec3f8c18f8b04580320deec3f49..da1710499698b0ed56ea89183d18befda56c0243 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/clocksource.h>
 #include <linux/sched/task.h>
 #include <linux/kallsyms.h>
+#include <linux/security.h>
 #include <linux/seq_file.h>
 #include <linux/tracefs.h>
 #include <linux/hardirq.h>
 #include "trace_output.h"
 #include "trace_stat.h"
 
+/* Flags that do not get reset */
+#define FTRACE_NOCLEAR_FLAGS   (FTRACE_FL_DISABLED | FTRACE_FL_TOUCHED | \
+                                FTRACE_FL_MODIFIED)
+
+#define FTRACE_INVALID_FUNCTION                "__ftrace_invalid_address__"
+
 #define FTRACE_WARN_ON(cond)                   \
        ({                                      \
                int ___r = cond;                \
@@ -61,8 +68,6 @@
        })
 
 /* hash bits for specific function selection */
-#define FTRACE_HASH_BITS 7
-#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
 #define FTRACE_HASH_DEFAULT_BITS 10
 #define FTRACE_HASH_MAX_BITS 12
 
@@ -81,13 +86,13 @@ enum {
 
 struct ftrace_ops ftrace_list_end __read_mostly = {
        .func           = ftrace_stub,
-       .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
+       .flags          = FTRACE_OPS_FL_STUB,
        INIT_OPS_HASH(ftrace_list_end)
 };
 
 /* ftrace_enabled is a method to turn ftrace on or off */
 int ftrace_enabled __read_mostly;
-static int last_ftrace_enabled;
+static int __maybe_unused last_ftrace_enabled;
 
 /* Current function tracing op */
 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
@@ -103,7 +108,7 @@ static bool ftrace_pids_enabled(struct ftrace_ops *ops)
 
        tr = ops->private;
 
-       return tr->function_pids != NULL;
+       return tr->function_pids != NULL || tr->function_no_pids != NULL;
 }
 
 static void ftrace_update_trampoline(struct ftrace_ops *ops);
@@ -120,13 +125,35 @@ struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
 struct ftrace_ops global_ops;
 
-#if ARCH_SUPPORTS_FTRACE_OPS
-static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
-                                struct ftrace_ops *op, struct pt_regs *regs);
-#else
-/* See comment below, where ftrace_ops_list_func is defined */
-static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
-#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
+/* Defined by vmlinux.lds.h see the comment above arch_ftrace_ops_list_func for details */
+void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+                         struct ftrace_ops *op, struct ftrace_regs *fregs);
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
+/*
+ * Stub used to invoke the list ops without requiring a separate trampoline.
+ */
+const struct ftrace_ops ftrace_list_ops = {
+       .func   = ftrace_ops_list_func,
+       .flags  = FTRACE_OPS_FL_STUB,
+};
+
+static void ftrace_ops_nop_func(unsigned long ip, unsigned long parent_ip,
+                               struct ftrace_ops *op,
+                               struct ftrace_regs *fregs)
+{
+       /* do nothing */
+}
+
+/*
+ * Stub used when a call site is disabled. May be called transiently by threads
+ * which have made it into ftrace_caller but haven't yet recovered the ops at
+ * the point the call site is disabled.
+ */
+const struct ftrace_ops ftrace_nop_ops = {
+       .func   = ftrace_ops_nop_func,
+       .flags  = FTRACE_OPS_FL_STUB,
+};
 #endif
 
 static inline void ftrace_ops_init(struct ftrace_ops *ops)
@@ -141,25 +168,21 @@ static inline void ftrace_ops_init(struct ftrace_ops *ops)
 }
 
 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
-                           struct ftrace_ops *op, struct pt_regs *regs)
+                           struct ftrace_ops *op, struct ftrace_regs *fregs)
 {
        struct trace_array *tr = op->private;
+       int pid;
 
-       if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid))
-               return;
-
-       op->saved_func(ip, parent_ip, op, regs);
-}
+       if (tr) {
+               pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
+               if (pid == FTRACE_PID_IGNORE)
+                       return;
+               if (pid != FTRACE_PID_TRACE &&
+                   pid != current->pid)
+                       return;
+       }
 
-static void ftrace_sync(struct work_struct *work)
-{
-       /*
-        * This function is just a stub to implement a hard force
-        * of synchronize_rcu(). This requires synchronizing
-        * tasks even in userspace and idle.
-        *
-        * Yes, function tracing is rude.
-        */
+       op->saved_func(ip, parent_ip, op, fregs);
 }
 
 static void ftrace_sync_ipi(void *data)
@@ -171,7 +194,7 @@ static void ftrace_sync_ipi(void *data)
 static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
 {
        /*
-        * If this is a dynamic, RCU, or per CPU ops, or we force list func,
+        * If this is a dynamic or RCU ops, or we force list func,
         * then it needs to call the list anyway.
         */
        if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
@@ -235,7 +258,7 @@ static void update_ftrace_function(void)
        /*
         * For static tracing, we need to be a bit more careful.
         * The function change takes affect immediately. Thus,
-        * we need to coorditate the setting of the function_trace_ops
+        * we need to coordinate the setting of the function_trace_ops
         * with the setting of the ftrace_trace_function.
         *
         * Set the function to the list ops, which will call the
@@ -247,7 +270,7 @@ static void update_ftrace_function(void)
         * Make sure all CPUs see this. Yes this is slow, but static
         * tracing is slow and nasty to have enabled.
         */
-       schedule_on_each_cpu(ftrace_sync);
+       synchronize_rcu_tasks_rude();
        /* Now all cpus are using the list ops. */
        function_trace_op = set_function_trace_op;
        /* Make sure the function_trace_op is visible on all CPUs */
@@ -325,8 +348,10 @@ int __register_ftrace_function(struct ftrace_ops *ops)
        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
                ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
 #endif
+       if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
+               return -EBUSY;
 
-       if (!core_kernel_data((unsigned long)ops))
+       if (!is_kernel_core_data((unsigned long)ops))
                ops->flags |= FTRACE_OPS_FL_DYNAMIC;
 
        add_ftrace_ops(&ftrace_ops_list, ops);
@@ -462,10 +487,10 @@ static void *function_stat_start(struct tracer_stat *trace)
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 /* function graph compares on total time */
-static int function_stat_cmp(void *p1, void *p2)
+static int function_stat_cmp(const void *p1, const void *p2)
 {
-       struct ftrace_profile *a = p1;
-       struct ftrace_profile *b = p2;
+       const struct ftrace_profile *a = p1;
+       const struct ftrace_profile *b = p2;
 
        if (a->time < b->time)
                return -1;
@@ -476,10 +501,10 @@ static int function_stat_cmp(void *p1, void *p2)
 }
 #else
 /* not function graph compares against hits */
-static int function_stat_cmp(void *p1, void *p2)
+static int function_stat_cmp(const void *p1, const void *p2)
 {
-       struct ftrace_profile *a = p1;
-       struct ftrace_profile *b = p2;
+       const struct ftrace_profile *a = p1;
+       const struct ftrace_profile *b = p2;
 
        if (a->counter < b->counter)
                return -1;
@@ -523,8 +548,7 @@ static int function_stat_show(struct seq_file *m, void *v)
        }
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       avg = rec->time;
-       do_div(avg, rec->counter);
+       avg = div64_ul(rec->time, rec->counter);
        if (tracing_thresh && (avg < tracing_thresh))
                goto out;
 #endif
@@ -550,7 +574,8 @@ static int function_stat_show(struct seq_file *m, void *v)
                 * Divide only 1000 for ns^2 -> us^2 conversion.
                 * trace_print_graph_duration will divide 1000 again.
                 */
-               do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
+               stddev = div64_ul(stddev,
+                                 rec->counter * (rec->counter - 1) * 1000);
        }
 
        trace_seq_init(&s);
@@ -584,7 +609,7 @@ static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
               FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
 }
 
-int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
+static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
 {
        struct ftrace_profile_page *pg;
        int functions;
@@ -757,7 +782,7 @@ ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
 
 static void
 function_profile_call(unsigned long ip, unsigned long parent_ip,
-                     struct ftrace_ops *ops, struct pt_regs *regs)
+                     struct ftrace_ops *ops, struct ftrace_regs *fregs)
 {
        struct ftrace_profile_stat *stat;
        struct ftrace_profile *rec;
@@ -869,7 +894,7 @@ static void unregister_ftrace_profiler(void)
 #else
 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
        .func           = function_profile_call,
-       .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+       .flags          = FTRACE_OPS_FL_INITIALIZED,
        INIT_OPS_HASH(ftrace_profile_ops)
 };
 
@@ -960,7 +985,6 @@ static struct tracer_stat function_stats __initdata = {
 static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
 {
        struct ftrace_profile_stat *stat;
-       struct dentry *entry;
        char *name;
        int ret;
        int cpu;
@@ -991,10 +1015,9 @@ static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
                }
        }
 
-       entry = tracefs_create_file("function_profile_enabled", 0644,
-                                   d_tracer, NULL, &ftrace_profile_fops);
-       if (!entry)
-               pr_warn("Could not create tracefs 'function_profile_enabled' entry\n");
+       trace_create_file("function_profile_enabled",
+                         TRACE_MODE_WRITE, d_tracer, NULL,
+                         &ftrace_profile_fops);
 }
 
 #else /* CONFIG_FUNCTION_PROFILER */
@@ -1017,11 +1040,6 @@ static bool update_all_ops;
 # error Dynamic ftrace depends on MCOUNT_RECORD
 #endif
 
-struct ftrace_func_entry {
-       struct hlist_node hlist;
-       unsigned long ip;
-};
-
 struct ftrace_func_probe {
        struct ftrace_probe_ops *probe_ops;
        struct ftrace_ops       ops;
@@ -1048,13 +1066,12 @@ struct ftrace_ops global_ops = {
        .local_hash.notrace_hash        = EMPTY_HASH,
        .local_hash.filter_hash         = EMPTY_HASH,
        INIT_OPS_HASH(global_ops)
-       .flags                          = FTRACE_OPS_FL_RECURSION_SAFE |
-                                         FTRACE_OPS_FL_INITIALIZED |
+       .flags                          = FTRACE_OPS_FL_INITIALIZED |
                                          FTRACE_OPS_FL_PID,
 };
 
 /*
- * Used by the stack undwinder to know about dynamic ftrace trampolines.
+ * Used by the stack unwinder to know about dynamic ftrace trampolines.
  */
 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
 {
@@ -1099,15 +1116,12 @@ struct ftrace_page {
        struct ftrace_page      *next;
        struct dyn_ftrace       *records;
        int                     index;
-       int                     size;
+       int                     order;
 };
 
 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
 
-/* estimate from running different kernels */
-#define NR_TO_INIT             10000
-
 static struct ftrace_page      *ftrace_pages_start;
 static struct ftrace_page      *ftrace_pages;
 
@@ -1146,7 +1160,7 @@ __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
  * Search a given @hash to see if a given instruction pointer (@ip)
  * exists in it.
  *
- * Returns the entry that holds the @ip if found. NULL otherwise.
+ * Returns: the entry that holds the @ip if found. NULL otherwise.
  */
 struct ftrace_func_entry *
 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
@@ -1169,18 +1183,19 @@ static void __add_hash_entry(struct ftrace_hash *hash,
        hash->count++;
 }
 
-static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
+static struct ftrace_func_entry *
+add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
 {
        struct ftrace_func_entry *entry;
 
        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
        if (!entry)
-               return -ENOMEM;
+               return NULL;
 
        entry->ip = ip;
        __add_hash_entry(hash, entry);
 
-       return 0;
+       return entry;
 }
 
 static void
@@ -1265,12 +1280,17 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
        call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
 }
 
+/**
+ * ftrace_free_filter - remove all filters for an ftrace_ops
+ * @ops: the ops to remove the filters from
+ */
 void ftrace_free_filter(struct ftrace_ops *ops)
 {
        ftrace_ops_init(ops);
        free_ftrace_hash(ops->func_hash->filter_hash);
        free_ftrace_hash(ops->func_hash->notrace_hash);
 }
+EXPORT_SYMBOL_GPL(ftrace_free_filter);
 
 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
 {
@@ -1306,6 +1326,7 @@ static int ftrace_add_mod(struct trace_array *tr,
        if (!ftrace_mod)
                return -ENOMEM;
 
+       INIT_LIST_HEAD(&ftrace_mod->list);
        ftrace_mod->func = kstrdup(func, GFP_KERNEL);
        ftrace_mod->module = kstrdup(module, GFP_KERNEL);
        ftrace_mod->enable = enable;
@@ -1329,7 +1350,6 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
        struct ftrace_func_entry *entry;
        struct ftrace_hash *new_hash;
        int size;
-       int ret;
        int i;
 
        new_hash = alloc_ftrace_hash(size_bits);
@@ -1346,8 +1366,7 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
        size = 1 << hash->size_bits;
        for (i = 0; i < size; i++) {
                hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
-                       ret = add_hash_entry(new_hash, entry->ip);
-                       if (ret < 0)
+                       if (add_hash_entry(new_hash, entry->ip) == NULL)
                                goto free_hash;
                }
        }
@@ -1369,28 +1388,20 @@ ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
                                       struct ftrace_hash *new_hash);
 
-static struct ftrace_hash *
-__ftrace_hash_move(struct ftrace_hash *src)
+static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size)
 {
        struct ftrace_func_entry *entry;
-       struct hlist_node *tn;
-       struct hlist_head *hhd;
        struct ftrace_hash *new_hash;
-       int size = src->count;
+       struct hlist_head *hhd;
+       struct hlist_node *tn;
        int bits = 0;
        int i;
 
        /*
-        * If the new source is empty, just return the empty_hash.
-        */
-       if (ftrace_hash_empty(src))
-               return EMPTY_HASH;
-
-       /*
-        * Make the hash size about 1/2 the # found
+        * Use around half the size (max bit of it), but
+        * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits).
         */
-       for (size /= 2; size; size >>= 1)
-               bits++;
+       bits = fls(size / 2);
 
        /* Don't allocate too much */
        if (bits > FTRACE_HASH_MAX_BITS)
@@ -1410,10 +1421,23 @@ __ftrace_hash_move(struct ftrace_hash *src)
                        __add_hash_entry(new_hash, entry);
                }
        }
-
        return new_hash;
 }
 
+static struct ftrace_hash *
+__ftrace_hash_move(struct ftrace_hash *src)
+{
+       int size = src->count;
+
+       /*
+        * If the new source is empty, just return the empty_hash.
+        */
+       if (ftrace_hash_empty(src))
+               return EMPTY_HASH;
+
+       return dup_hash(src, size);
+}
+
 static int
 ftrace_hash_move(struct ftrace_ops *ops, int enable,
                 struct ftrace_hash **dst, struct ftrace_hash *src)
@@ -1457,7 +1481,7 @@ static bool hash_contains_ip(unsigned long ip,
 {
        /*
         * The function record is a match if it exists in the filter
-        * hash and not in the notrace hash. Note, an emty hash is
+        * hash and not in the notrace hash. Note, an empty hash is
         * considered a match for the filter hash, but an empty
         * notrace hash is considered not in the notrace hash.
         */
@@ -1533,53 +1557,82 @@ static int ftrace_cmp_recs(const void *a, const void *b)
        return 0;
 }
 
-/**
- * ftrace_location_range - return the first address of a traced location
- *     if it touches the given ip range
- * @start: start of range to search.
- * @end: end of range to search (inclusive). @end points to the last byte
- *     to check.
- *
- * Returns rec->ip if the related ftrace location is a least partly within
- * the given address range. That is, the first address of the instruction
- * that is either a NOP or call to the function tracer. It checks the ftrace
- * internal tables to determine if the address belongs or not.
- */
-unsigned long ftrace_location_range(unsigned long start, unsigned long end)
+static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
 {
        struct ftrace_page *pg;
-       struct dyn_ftrace *rec;
+       struct dyn_ftrace *rec = NULL;
        struct dyn_ftrace key;
 
        key.ip = start;
        key.flags = end;        /* overload flags, as it is unsigned long */
 
        for (pg = ftrace_pages_start; pg; pg = pg->next) {
-               if (end < pg->records[0].ip ||
+               if (pg->index == 0 ||
+                   end < pg->records[0].ip ||
                    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
                        continue;
                rec = bsearch(&key, pg->records, pg->index,
                              sizeof(struct dyn_ftrace),
                              ftrace_cmp_recs);
                if (rec)
-                       return rec->ip;
+                       break;
        }
+       return rec;
+}
+
+/**
+ * ftrace_location_range - return the first address of a traced location
+ *     if it touches the given ip range
+ * @start: start of range to search.
+ * @end: end of range to search (inclusive). @end points to the last byte
+ *     to check.
+ *
+ * Returns: rec->ip if the related ftrace location is a least partly within
+ * the given address range. That is, the first address of the instruction
+ * that is either a NOP or call to the function tracer. It checks the ftrace
+ * internal tables to determine if the address belongs or not.
+ */
+unsigned long ftrace_location_range(unsigned long start, unsigned long end)
+{
+       struct dyn_ftrace *rec;
+
+       rec = lookup_rec(start, end);
+       if (rec)
+               return rec->ip;
 
        return 0;
 }
 
 /**
- * ftrace_location - return true if the ip giving is a traced location
+ * ftrace_location - return the ftrace location
  * @ip: the instruction pointer to check
  *
- * Returns rec->ip if @ip given is a pointer to a ftrace location.
- * That is, the instruction that is either a NOP or call to
- * the function tracer. It checks the ftrace internal tables to
- * determine if the address belongs or not.
+ * Returns:
+ * * If @ip matches the ftrace location, return @ip.
+ * * If @ip matches sym+0, return sym's ftrace location.
+ * * Otherwise, return 0.
  */
 unsigned long ftrace_location(unsigned long ip)
 {
-       return ftrace_location_range(ip, ip);
+       struct dyn_ftrace *rec;
+       unsigned long offset;
+       unsigned long size;
+
+       rec = lookup_rec(ip, ip);
+       if (!rec) {
+               if (!kallsyms_lookup_size_offset(ip, &size, &offset))
+                       goto out;
+
+               /* map sym+0 to __fentry__ */
+               if (!offset)
+                       rec = lookup_rec(ip, ip + size - 1);
+       }
+
+       if (rec)
+               return rec->ip;
+
+out:
+       return 0;
 }
 
 /**
@@ -1587,7 +1640,7 @@ unsigned long ftrace_location(unsigned long ip)
  * @start: start of range to search
  * @end: end of range to search (inclusive). @end points to the last byte to check.
  *
- * Returns 1 if @start and @end contains a ftrace location.
+ * Returns: 1 if @start and @end contains a ftrace location.
  * That is, the instruction that is either a NOP or call to
  * the function tracer. It checks the ftrace internal tables to
  * determine if the address belongs or not.
@@ -1622,6 +1675,25 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
        return  keep_regs;
 }
 
+static struct ftrace_ops *
+ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
+static struct ftrace_ops *
+ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
+static struct ftrace_ops *
+ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
+
+static bool skip_record(struct dyn_ftrace *rec)
+{
+       /*
+        * At boot up, weak functions are set to disable. Function tracing
+        * can be enabled before they are, and they still need to be disabled now.
+        * If the record is disabled, still continue if it is marked as already
+        * enabled (this is needed to keep the accounting working).
+        */
+       return rec->flags & FTRACE_FL_DISABLED &&
+               !(rec->flags & FTRACE_FL_ENABLED);
+}
+
 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
                                     int filter_hash,
                                     bool inc)
@@ -1671,7 +1743,7 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
                int in_hash = 0;
                int match = 0;
 
-               if (rec->flags & FTRACE_FL_DISABLED)
+               if (skip_record(rec))
                        continue;
 
                if (all) {
@@ -1709,6 +1781,9 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
                        if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
                                return false;
 
+                       if (ops->flags & FTRACE_OPS_FL_DIRECT)
+                               rec->flags |= FTRACE_FL_DIRECT;
+
                        /*
                         * If there's only a single callback registered to a
                         * function, and the ops has a trampoline registered
@@ -1736,6 +1811,15 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
                                return false;
                        rec->flags--;
 
+                       /*
+                        * Only the internal direct_ops should have the
+                        * DIRECT flag set. Thus, if it is removing a
+                        * function, then that function should no longer
+                        * be direct.
+                        */
+                       if (ops->flags & FTRACE_OPS_FL_DIRECT)
+                               rec->flags &= ~FTRACE_FL_DIRECT;
+
                        /*
                         * If the rec had REGS enabled and the ops that is
                         * being removed had REGS set, then see if there is
@@ -1750,25 +1834,39 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
                        }
 
                        /*
-                        * If the rec had TRAMP enabled, then it needs to
-                        * be cleared. As TRAMP can only be enabled iff
-                        * there is only a single ops attached to it.
-                        * In otherwords, always disable it on decrementing.
-                        * In the future, we may set it if rec count is
-                        * decremented to one, and the ops that is left
-                        * has a trampoline.
+                        * The TRAMP needs to be set only if rec count
+                        * is decremented to one, and the ops that is
+                        * left has a trampoline. As TRAMP can only be
+                        * enabled if there is only a single ops attached
+                        * to it.
                         */
-                       rec->flags &= ~FTRACE_FL_TRAMP;
+                       if (ftrace_rec_count(rec) == 1 &&
+                           ftrace_find_tramp_ops_any_other(rec, ops))
+                               rec->flags |= FTRACE_FL_TRAMP;
+                       else
+                               rec->flags &= ~FTRACE_FL_TRAMP;
 
                        /*
                         * flags will be cleared in ftrace_check_record()
                         * if rec count is zero.
                         */
                }
+
+               /*
+                * If the rec has a single associated ops, and ops->func can be
+                * called directly, allow the call site to call via the ops.
+                */
+               if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) &&
+                   ftrace_rec_count(rec) == 1 &&
+                   ftrace_ops_get_func(ops) == ops->func)
+                       rec->flags |= FTRACE_FL_CALL_OPS;
+               else
+                       rec->flags &= ~FTRACE_FL_CALL_OPS;
+
                count++;
 
                /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
-               update |= ftrace_test_record(rec, 1) != FTRACE_UPDATE_IGNORE;
+               update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE;
 
                /* Shortcut, if we handled all records, we are done. */
                if (!all && count == hash->count)
@@ -1833,6 +1931,13 @@ static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
  *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
  *  - If the hash is EMPTY_HASH, it hits nothing
  *  - Anything else hits the recs which match the hash entries.
+ *
+ * DIRECT ops does not have IPMODIFY flag, but we still need to check it
+ * against functions with FTRACE_FL_IPMODIFY. If there is any overlap, call
+ * ops_func(SHARE_IPMODIFY_SELF) to make sure current ops can share with
+ * IPMODIFY. If ops_func(SHARE_IPMODIFY_SELF) returns non-zero, propagate
+ * the return value to the caller and eventually to the owner of the DIRECT
+ * ops.
  */
 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
                                         struct ftrace_hash *old_hash,
@@ -1841,17 +1946,26 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
        struct ftrace_page *pg;
        struct dyn_ftrace *rec, *end = NULL;
        int in_old, in_new;
+       bool is_ipmodify, is_direct;
 
        /* Only update if the ops has been registered */
        if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
                return 0;
 
-       if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
+       is_ipmodify = ops->flags & FTRACE_OPS_FL_IPMODIFY;
+       is_direct = ops->flags & FTRACE_OPS_FL_DIRECT;
+
+       /* neither IPMODIFY nor DIRECT, skip */
+       if (!is_ipmodify && !is_direct)
+               return 0;
+
+       if (WARN_ON_ONCE(is_ipmodify && is_direct))
                return 0;
 
        /*
-        * Since the IPMODIFY is a very address sensitive action, we do not
-        * allow ftrace_ops to set all functions to new hash.
+        * Since the IPMODIFY and DIRECT are very address sensitive
+        * actions, we do not allow ftrace_ops to set all functions to new
+        * hash.
         */
        if (!new_hash || !old_hash)
                return -EINVAL;
@@ -1869,12 +1983,32 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
                        continue;
 
                if (in_new) {
-                       /* New entries must ensure no others are using it */
-                       if (rec->flags & FTRACE_FL_IPMODIFY)
-                               goto rollback;
-                       rec->flags |= FTRACE_FL_IPMODIFY;
-               } else /* Removed entry */
+                       if (rec->flags & FTRACE_FL_IPMODIFY) {
+                               int ret;
+
+                               /* Cannot have two ipmodify on same rec */
+                               if (is_ipmodify)
+                                       goto rollback;
+
+                               FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT);
+
+                               /*
+                                * Another ops with IPMODIFY is already
+                                * attached. We are now attaching a direct
+                                * ops. Run SHARE_IPMODIFY_SELF, to check
+                                * whether sharing is supported.
+                                */
+                               if (!ops->ops_func)
+                                       return -EBUSY;
+                               ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF);
+                               if (ret)
+                                       return ret;
+                       } else if (is_ipmodify) {
+                               rec->flags |= FTRACE_FL_IPMODIFY;
+                       }
+               } else if (is_ipmodify) {
                        rec->flags &= ~FTRACE_FL_IPMODIFY;
+               }
        } while_for_each_ftrace_rec();
 
        return 0;
@@ -1943,19 +2077,17 @@ static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
 
 static void print_ip_ins(const char *fmt, const unsigned char *p)
 {
-       int i;
+       char ins[MCOUNT_INSN_SIZE];
 
-       printk(KERN_CONT "%s", fmt);
+       if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
+               printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
+               return;
+       }
 
-       for (i = 0; i < MCOUNT_INSN_SIZE; i++)
-               printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
+       printk(KERN_CONT "%s", fmt);
+       pr_cont("%*phC", MCOUNT_INSN_SIZE, ins);
 }
 
-static struct ftrace_ops *
-ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
-static struct ftrace_ops *
-ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
-
 enum ftrace_bug_type ftrace_bug_type;
 const void *ftrace_expected;
 
@@ -1995,16 +2127,16 @@ void ftrace_bug(int failed, struct dyn_ftrace *rec)
 {
        unsigned long ip = rec ? rec->ip : 0;
 
+       pr_info("------------[ ftrace bug ]------------\n");
+
        switch (failed) {
        case -EFAULT:
-               FTRACE_WARN_ON_ONCE(1);
                pr_info("ftrace faulted on modifying ");
-               print_ip_sym(ip);
+               print_ip_sym(KERN_INFO, ip);
                break;
        case -EINVAL:
-               FTRACE_WARN_ON_ONCE(1);
                pr_info("ftrace failed to modify ");
-               print_ip_sym(ip);
+               print_ip_sym(KERN_INFO, ip);
                print_ip_ins(" actual:   ", (unsigned char *)ip);
                pr_cont("\n");
                if (ftrace_expected) {
@@ -2013,22 +2145,21 @@ void ftrace_bug(int failed, struct dyn_ftrace *rec)
                }
                break;
        case -EPERM:
-               FTRACE_WARN_ON_ONCE(1);
                pr_info("ftrace faulted on writing ");
-               print_ip_sym(ip);
+               print_ip_sym(KERN_INFO, ip);
                break;
        default:
-               FTRACE_WARN_ON_ONCE(1);
                pr_info("ftrace faulted on unknown error ");
-               print_ip_sym(ip);
+               print_ip_sym(KERN_INFO, ip);
        }
        print_bug_type();
        if (rec) {
                struct ftrace_ops *ops = NULL;
 
                pr_info("ftrace record flags: %lx\n", rec->flags);
-               pr_cont(" (%ld)%s", ftrace_rec_count(rec),
-                       rec->flags & FTRACE_FL_REGS ? " R" : "  ");
+               pr_cont(" (%ld)%s%s", ftrace_rec_count(rec),
+                       rec->flags & FTRACE_FL_REGS ? " R" : "  ",
+                       rec->flags & FTRACE_FL_CALL_OPS ? " O" : "  ");
                if (rec->flags & FTRACE_FL_TRAMP_EN) {
                        ops = ftrace_find_tramp_ops_any(rec);
                        if (ops) {
@@ -2045,15 +2176,17 @@ void ftrace_bug(int failed, struct dyn_ftrace *rec)
                ip = ftrace_get_addr_curr(rec);
                pr_cont("\n expected tramp: %lx\n", ip);
        }
+
+       FTRACE_WARN_ON_ONCE(1);
 }
 
-static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
+static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
 {
        unsigned long flag = 0UL;
 
        ftrace_bug_type = FTRACE_BUG_UNKNOWN;
 
-       if (rec->flags & FTRACE_FL_DISABLED)
+       if (skip_record(rec))
                return FTRACE_UPDATE_IGNORE;
 
        /*
@@ -2074,15 +2207,48 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
         * If enabling and the REGS flag does not match the REGS_EN, or
         * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
         * this record. Set flags to fail the compare against ENABLED.
+        * Same for direct calls.
         */
        if (flag) {
-               if (!(rec->flags & FTRACE_FL_REGS) != 
+               if (!(rec->flags & FTRACE_FL_REGS) !=
                    !(rec->flags & FTRACE_FL_REGS_EN))
                        flag |= FTRACE_FL_REGS;
 
-               if (!(rec->flags & FTRACE_FL_TRAMP) != 
+               if (!(rec->flags & FTRACE_FL_TRAMP) !=
                    !(rec->flags & FTRACE_FL_TRAMP_EN))
                        flag |= FTRACE_FL_TRAMP;
+
+               /*
+                * Direct calls are special, as count matters.
+                * We must test the record for direct, if the
+                * DIRECT and DIRECT_EN do not match, but only
+                * if the count is 1. That's because, if the
+                * count is something other than one, we do not
+                * want the direct enabled (it will be done via the
+                * direct helper). But if DIRECT_EN is set, and
+                * the count is not one, we need to clear it.
+                *
+                */
+               if (ftrace_rec_count(rec) == 1) {
+                       if (!(rec->flags & FTRACE_FL_DIRECT) !=
+                           !(rec->flags & FTRACE_FL_DIRECT_EN))
+                               flag |= FTRACE_FL_DIRECT;
+               } else if (rec->flags & FTRACE_FL_DIRECT_EN) {
+                       flag |= FTRACE_FL_DIRECT;
+               }
+
+               /*
+                * Ops calls are special, as count matters.
+                * As with direct calls, they must only be enabled when count
+                * is one, otherwise they'll be handled via the list ops.
+                */
+               if (ftrace_rec_count(rec) == 1) {
+                       if (!(rec->flags & FTRACE_FL_CALL_OPS) !=
+                           !(rec->flags & FTRACE_FL_CALL_OPS_EN))
+                               flag |= FTRACE_FL_CALL_OPS;
+               } else if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
+                       flag |= FTRACE_FL_CALL_OPS;
+               }
        }
 
        /* If the state of this record hasn't changed, then do nothing */
@@ -2094,7 +2260,7 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
                flag ^= rec->flags & FTRACE_FL_ENABLED;
 
                if (update) {
-                       rec->flags |= FTRACE_FL_ENABLED;
+                       rec->flags |= FTRACE_FL_ENABLED | FTRACE_FL_TOUCHED;
                        if (flag & FTRACE_FL_REGS) {
                                if (rec->flags & FTRACE_FL_REGS)
                                        rec->flags |= FTRACE_FL_REGS_EN;
@@ -2107,6 +2273,45 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
                                else
                                        rec->flags &= ~FTRACE_FL_TRAMP_EN;
                        }
+
+                       /* Keep track of anything that modifies the function */
+                       if (rec->flags & (FTRACE_FL_DIRECT | FTRACE_FL_IPMODIFY))
+                               rec->flags |= FTRACE_FL_MODIFIED;
+
+                       if (flag & FTRACE_FL_DIRECT) {
+                               /*
+                                * If there's only one user (direct_ops helper)
+                                * then we can call the direct function
+                                * directly (no ftrace trampoline).
+                                */
+                               if (ftrace_rec_count(rec) == 1) {
+                                       if (rec->flags & FTRACE_FL_DIRECT)
+                                               rec->flags |= FTRACE_FL_DIRECT_EN;
+                                       else
+                                               rec->flags &= ~FTRACE_FL_DIRECT_EN;
+                               } else {
+                                       /*
+                                        * Can only call directly if there's
+                                        * only one callback to the function.
+                                        */
+                                       rec->flags &= ~FTRACE_FL_DIRECT_EN;
+                               }
+                       }
+
+                       if (flag & FTRACE_FL_CALL_OPS) {
+                               if (ftrace_rec_count(rec) == 1) {
+                                       if (rec->flags & FTRACE_FL_CALL_OPS)
+                                               rec->flags |= FTRACE_FL_CALL_OPS_EN;
+                                       else
+                                               rec->flags &= ~FTRACE_FL_CALL_OPS_EN;
+                               } else {
+                                       /*
+                                        * Can only call directly if there's
+                                        * only one set of associated ops.
+                                        */
+                                       rec->flags &= ~FTRACE_FL_CALL_OPS_EN;
+                               }
+                       }
                }
 
                /*
@@ -2129,14 +2334,15 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
        if (update) {
                /* If there's no more users, clear all flags */
                if (!ftrace_rec_count(rec))
-                       rec->flags = 0;
+                       rec->flags &= FTRACE_NOCLEAR_FLAGS;
                else
                        /*
                         * Just disable the record, but keep the ops TRAMP
                         * and REGS states. The _EN flags must be disabled though.
                         */
                        rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
-                                       FTRACE_FL_REGS_EN);
+                                       FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN |
+                                       FTRACE_FL_CALL_OPS_EN);
        }
 
        ftrace_bug_type = FTRACE_BUG_NOP;
@@ -2144,30 +2350,30 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
 }
 
 /**
- * ftrace_update_record, set a record that now is tracing or not
+ * ftrace_update_record - set a record that now is tracing or not
  * @rec: the record to update
- * @enable: set to 1 if the record is tracing, zero to force disable
+ * @enable: set to true if the record is tracing, false to force disable
  *
  * The records that represent all functions that can be traced need
  * to be updated when tracing has been enabled.
  */
-int ftrace_update_record(struct dyn_ftrace *rec, int enable)
+int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
 {
-       return ftrace_check_record(rec, enable, 1);
+       return ftrace_check_record(rec, enable, true);
 }
 
 /**
- * ftrace_test_record, check if the record has been enabled or not
+ * ftrace_test_record - check if the record has been enabled or not
  * @rec: the record to test
- * @enable: set to 1 to check if enabled, 0 if it is disabled
+ * @enable: set to true to check if enabled, false if it is disabled
  *
  * The arch code may need to test if a record is already set to
  * tracing to determine how to modify the function code that it
  * represents.
  */
-int ftrace_test_record(struct dyn_ftrace *rec, int enable)
+int ftrace_test_record(struct dyn_ftrace *rec, bool enable)
 {
-       return ftrace_check_record(rec, enable, 0);
+       return ftrace_check_record(rec, enable, false);
 }
 
 static struct ftrace_ops *
@@ -2188,6 +2394,24 @@ ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
        return NULL;
 }
 
+static struct ftrace_ops *
+ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
+{
+       struct ftrace_ops *op;
+       unsigned long ip = rec->ip;
+
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+
+               if (op == op_exclude || !op->trampoline)
+                       continue;
+
+               if (hash_contains_ip(ip, op->func_hash))
+                       return op;
+       } while_for_each_ftrace_op(op);
+
+       return NULL;
+}
+
 static struct ftrace_ops *
 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
                           struct ftrace_ops *op)
@@ -2201,7 +2425,7 @@ ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
 
                if (hash_contains_ip(ip, op->func_hash))
                        return op;
-       } 
+       }
 
        return NULL;
 }
@@ -2291,19 +2515,80 @@ ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
        return NULL;
 }
 
+struct ftrace_ops *
+ftrace_find_unique_ops(struct dyn_ftrace *rec)
+{
+       struct ftrace_ops *op, *found = NULL;
+       unsigned long ip = rec->ip;
+
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+
+               if (hash_contains_ip(ip, op->func_hash)) {
+                       if (found)
+                               return NULL;
+                       found = op;
+               }
+
+       } while_for_each_ftrace_op(op);
+
+       return found;
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+/* Protected by rcu_tasks for reading, and direct_mutex for writing */
+static struct ftrace_hash __rcu *direct_functions = EMPTY_HASH;
+static DEFINE_MUTEX(direct_mutex);
+int ftrace_direct_func_count;
+
+/*
+ * Search the direct_functions hash to see if the given instruction pointer
+ * has a direct caller attached to it.
+ */
+unsigned long ftrace_find_rec_direct(unsigned long ip)
+{
+       struct ftrace_func_entry *entry;
+
+       entry = __ftrace_lookup_ip(direct_functions, ip);
+       if (!entry)
+               return 0;
+
+       return entry->direct;
+}
+
+static void call_direct_funcs(unsigned long ip, unsigned long pip,
+                             struct ftrace_ops *ops, struct ftrace_regs *fregs)
+{
+       unsigned long addr = READ_ONCE(ops->direct_call);
+
+       if (!addr)
+               return;
+
+       arch_ftrace_set_direct_caller(fregs, addr);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+
 /**
  * ftrace_get_addr_new - Get the call address to set to
  * @rec:  The ftrace record descriptor
  *
  * If the record has the FTRACE_FL_REGS set, that means that it
  * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
- * is not not set, then it wants to convert to the normal callback.
+ * is not set, then it wants to convert to the normal callback.
  *
- * Returns the address of the trampoline to set to
+ * Returns: the address of the trampoline to set to
  */
 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
 {
        struct ftrace_ops *ops;
+       unsigned long addr;
+
+       if ((rec->flags & FTRACE_FL_DIRECT) &&
+           (ftrace_rec_count(rec) == 1)) {
+               addr = ftrace_find_rec_direct(rec->ip);
+               if (addr)
+                       return addr;
+               WARN_ON_ONCE(1);
+       }
 
        /* Trampolines take precedence over regs */
        if (rec->flags & FTRACE_FL_TRAMP) {
@@ -2331,11 +2616,20 @@ unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
  * a function that saves all the regs. Basically the '_EN' version
  * represents the current state of the function.
  *
- * Returns the address of the trampoline that is currently being called
+ * Returns: the address of the trampoline that is currently being called
  */
 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
 {
        struct ftrace_ops *ops;
+       unsigned long addr;
+
+       /* Direct calls take precedence over trampolines */
+       if (rec->flags & FTRACE_FL_DIRECT_EN) {
+               addr = ftrace_find_rec_direct(rec->ip);
+               if (addr)
+                       return addr;
+               WARN_ON_ONCE(1);
+       }
 
        /* Trampolines take precedence over regs */
        if (rec->flags & FTRACE_FL_TRAMP_EN) {
@@ -2356,7 +2650,7 @@ unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
 }
 
 static int
-__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
+__ftrace_replace_code(struct dyn_ftrace *rec, bool enable)
 {
        unsigned long ftrace_old_addr;
        unsigned long ftrace_addr;
@@ -2395,7 +2689,7 @@ void __weak ftrace_replace_code(int mod_flags)
 {
        struct dyn_ftrace *rec;
        struct ftrace_page *pg;
-       int enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
+       bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
        int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
        int failed;
 
@@ -2404,7 +2698,7 @@ void __weak ftrace_replace_code(int mod_flags)
 
        do_for_each_ftrace_rec(pg, rec) {
 
-               if (rec->flags & FTRACE_FL_DISABLED)
+               if (skip_record(rec))
                        continue;
 
                failed = __ftrace_replace_code(rec, enable);
@@ -2424,9 +2718,9 @@ struct ftrace_rec_iter {
 };
 
 /**
- * ftrace_rec_iter_start, start up iterating over traced functions
+ * ftrace_rec_iter_start - start up iterating over traced functions
  *
- * Returns an iterator handle that is used to iterate over all
+ * Returns: an iterator handle that is used to iterate over all
  * the records that represent address locations where functions
  * are traced.
  *
@@ -2455,10 +2749,10 @@ struct ftrace_rec_iter *ftrace_rec_iter_start(void)
 }
 
 /**
- * ftrace_rec_iter_next, get the next record to process.
+ * ftrace_rec_iter_next - get the next record to process.
  * @iter: The handle to the iterator.
  *
- * Returns the next iterator after the given iterator @iter.
+ * Returns: the next iterator after the given iterator @iter.
  */
 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
 {
@@ -2480,10 +2774,10 @@ struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
 }
 
 /**
- * ftrace_rec_iter_record, get the record at the iterator location
+ * ftrace_rec_iter_record - get the record at the iterator location
  * @iter: The current iterator location
  *
- * Returns the record that the current @iter is at.
+ * Returns: the record that the current @iter is at.
  */
 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
 {
@@ -2491,14 +2785,14 @@ struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
 }
 
 static int
-ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
+ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
 {
        int ret;
 
        if (unlikely(ftrace_disabled))
                return 0;
 
-       ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
+       ret = ftrace_init_nop(mod, rec);
        if (ret) {
                ftrace_bug_type = FTRACE_BUG_INIT;
                ftrace_bug(ret, rec);
@@ -2511,18 +2805,29 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
  * archs can override this function if they must do something
  * before the modifying code is performed.
  */
-int __weak ftrace_arch_code_modify_prepare(void)
+void __weak ftrace_arch_code_modify_prepare(void)
 {
-       return 0;
 }
 
 /*
  * archs can override this function if they must do something
  * after the modifying code is performed.
  */
-int __weak ftrace_arch_code_modify_post_process(void)
+void __weak ftrace_arch_code_modify_post_process(void)
 {
-       return 0;
+}
+
+static int update_ftrace_func(ftrace_func_t func)
+{
+       static ftrace_func_t save_func;
+
+       /* Avoid updating if it hasn't changed */
+       if (func == save_func)
+               return 0;
+
+       save_func = func;
+
+       return ftrace_update_ftrace_func(func);
 }
 
 void ftrace_modify_all_code(int command)
@@ -2545,7 +2850,7 @@ void ftrace_modify_all_code(int command)
         * traced.
         */
        if (update) {
-               err = ftrace_update_ftrace_func(ftrace_ops_list_func);
+               err = update_ftrace_func(ftrace_ops_list_func);
                if (FTRACE_WARN_ON(err))
                        return;
        }
@@ -2561,7 +2866,7 @@ void ftrace_modify_all_code(int command)
                /* If irqs are disabled, we are in stop machine */
                if (!irqs_disabled())
                        smp_call_function(ftrace_sync_ipi, NULL, 1);
-               err = ftrace_update_ftrace_func(ftrace_trace_function);
+               err = update_ftrace_func(ftrace_trace_function);
                if (FTRACE_WARN_ON(err))
                        return;
        }
@@ -2583,7 +2888,7 @@ static int __ftrace_modify_code(void *data)
 }
 
 /**
- * ftrace_run_stop_machine, go back to the stop machine method
+ * ftrace_run_stop_machine - go back to the stop machine method
  * @command: The command to tell ftrace what to do
  *
  * If an arch needs to fall back to the stop machine method, the
@@ -2595,7 +2900,7 @@ void ftrace_run_stop_machine(int command)
 }
 
 /**
- * arch_ftrace_update_code, modify the code to trace or not trace
+ * arch_ftrace_update_code - modify the code to trace or not trace
  * @command: The command that needs to be done
  *
  * Archs can override this function if it does not need to
@@ -2608,12 +2913,7 @@ void __weak arch_ftrace_update_code(int command)
 
 static void ftrace_run_update_code(int command)
 {
-       int ret;
-
-       ret = ftrace_arch_code_modify_prepare();
-       FTRACE_WARN_ON(ret);
-       if (ret)
-               return;
+       ftrace_arch_code_modify_prepare();
 
        /*
         * By default we use stop_machine() to modify the code.
@@ -2623,8 +2923,7 @@ static void ftrace_run_update_code(int command)
         */
        arch_ftrace_update_code(command);
 
-       ret = ftrace_arch_code_modify_post_process();
-       FTRACE_WARN_ON(ret);
+       ftrace_arch_code_modify_post_process();
 }
 
 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
@@ -2646,15 +2945,60 @@ void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
 {
 }
 
-static void ftrace_startup_enable(int command)
+/* List of trace_ops that have allocated trampolines */
+static LIST_HEAD(ftrace_ops_trampoline_list);
+
+static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops)
 {
-       if (saved_ftrace_func != ftrace_trace_function) {
-               saved_ftrace_func = ftrace_trace_function;
-               command |= FTRACE_UPDATE_TRACE_FUNC;
-       }
+       lockdep_assert_held(&ftrace_lock);
+       list_add_rcu(&ops->list, &ftrace_ops_trampoline_list);
+}
 
-       if (!command || !ftrace_enabled)
-               return;
+static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops)
+{
+       lockdep_assert_held(&ftrace_lock);
+       list_del_rcu(&ops->list);
+       synchronize_rcu();
+}
+
+/*
+ * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols
+ * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is
+ * not a module.
+ */
+#define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace"
+#define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline"
+
+static void ftrace_trampoline_free(struct ftrace_ops *ops)
+{
+       if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) &&
+           ops->trampoline) {
+               /*
+                * Record the text poke event before the ksymbol unregister
+                * event.
+                */
+               perf_event_text_poke((void *)ops->trampoline,
+                                    (void *)ops->trampoline,
+                                    ops->trampoline_size, NULL, 0);
+               perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
+                                  ops->trampoline, ops->trampoline_size,
+                                  true, FTRACE_TRAMPOLINE_SYM);
+               /* Remove from kallsyms after the perf events */
+               ftrace_remove_trampoline_from_kallsyms(ops);
+       }
+
+       arch_ftrace_trampoline_free(ops);
+}
+
+static void ftrace_startup_enable(int command)
+{
+       if (saved_ftrace_func != ftrace_trace_function) {
+               saved_ftrace_func = ftrace_trace_function;
+               command |= FTRACE_UPDATE_TRACE_FUNC;
+       }
+
+       if (!command || !ftrace_enabled)
+               return;
 
        ftrace_run_update_code(command);
 }
@@ -2695,6 +3039,8 @@ int ftrace_startup(struct ftrace_ops *ops, int command)
                __unregister_ftrace_function(ops);
                ftrace_start_up--;
                ops->flags &= ~FTRACE_OPS_FL_ENABLED;
+               if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
+                       ftrace_trampoline_free(ops);
                return ret;
        }
 
@@ -2703,6 +3049,16 @@ int ftrace_startup(struct ftrace_ops *ops, int command)
 
        ftrace_startup_enable(command);
 
+       /*
+        * If ftrace is in an undefined state, we just remove ops from list
+        * to prevent the NULL pointer, instead of totally rolling it back and
+        * free trampoline, because those actions could cause further damage.
+        */
+       if (unlikely(ftrace_disabled)) {
+               __unregister_ftrace_function(ops);
+               return -ENODEV;
+       }
+
        ops->flags &= ~FTRACE_OPS_FL_ADDING;
 
        return 0;
@@ -2740,18 +3096,8 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
                command |= FTRACE_UPDATE_TRACE_FUNC;
        }
 
-       if (!command || !ftrace_enabled) {
-               /*
-                * If these are dynamic or per_cpu ops, they still
-                * need their data freed. Since, function tracing is
-                * not currently active, we can just free them
-                * without synchronizing all CPUs.
-                */
-               if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
-                       goto free_ops;
-
-               return 0;
-       }
+       if (!command || !ftrace_enabled)
+               goto out;
 
        /*
         * If the ops uses a trampoline, then it needs to be
@@ -2776,7 +3122,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
                struct dyn_ftrace *rec;
 
                do_for_each_ftrace_rec(pg, rec) {
-                       if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
+                       if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_NOCLEAR_FLAGS))
                                pr_warn("  %pS flags:%lx\n",
                                        (void *)rec->ip, rec->flags);
                } while_for_each_ftrace_rec();
@@ -2788,11 +3134,10 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
        removed_ops = NULL;
        ops->flags &= ~FTRACE_OPS_FL_REMOVING;
 
+out:
        /*
         * Dynamic ops may be freed, we must make sure that all
         * callers are done before leaving this function.
-        * The same goes for freeing the per_cpu data of the per_cpu
-        * ops.
         */
        if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
                /*
@@ -2803,61 +3148,28 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
                 * infrastructure to do the synchronization, thus we must do it
                 * ourselves.
                 */
-               schedule_on_each_cpu(ftrace_sync);
+               synchronize_rcu_tasks_rude();
 
                /*
-                * When the kernel is preeptive, tasks can be preempted
+                * When the kernel is preemptive, tasks can be preempted
                 * while on a ftrace trampoline. Just scheduling a task on
                 * a CPU is not good enough to flush them. Calling
-                * synchornize_rcu_tasks() will wait for those tasks to
+                * synchronize_rcu_tasks() will wait for those tasks to
                 * execute and either schedule voluntarily or enter user space.
                 */
-               if (IS_ENABLED(CONFIG_PREEMPT))
+               if (IS_ENABLED(CONFIG_PREEMPTION))
                        synchronize_rcu_tasks();
 
- free_ops:
-               arch_ftrace_trampoline_free(ops);
+               ftrace_trampoline_free(ops);
        }
 
        return 0;
 }
 
-static void ftrace_startup_sysctl(void)
-{
-       int command;
-
-       if (unlikely(ftrace_disabled))
-               return;
-
-       /* Force update next time */
-       saved_ftrace_func = NULL;
-       /* ftrace_start_up is true if we want ftrace running */
-       if (ftrace_start_up) {
-               command = FTRACE_UPDATE_CALLS;
-               if (ftrace_graph_active)
-                       command |= FTRACE_START_FUNC_RET;
-               ftrace_startup_enable(command);
-       }
-}
-
-static void ftrace_shutdown_sysctl(void)
-{
-       int command;
-
-       if (unlikely(ftrace_disabled))
-               return;
-
-       /* ftrace_start_up is true if ftrace is running */
-       if (ftrace_start_up) {
-               command = FTRACE_DISABLE_CALLS;
-               if (ftrace_graph_active)
-                       command |= FTRACE_STOP_FUNC_RET;
-               ftrace_run_update_code(command);
-       }
-}
-
 static u64             ftrace_update_time;
 unsigned long          ftrace_update_tot_cnt;
+unsigned long          ftrace_number_of_pages;
+unsigned long          ftrace_number_of_groups;
 
 static inline int ops_traces_mod(struct ftrace_ops *ops)
 {
@@ -2869,38 +3181,9 @@ static inline int ops_traces_mod(struct ftrace_ops *ops)
                ftrace_hash_empty(ops->func_hash->notrace_hash);
 }
 
-/*
- * Check if the current ops references the record.
- *
- * If the ops traces all functions, then it was already accounted for.
- * If the ops does not trace the current record function, skip it.
- * If the ops ignores the function via notrace filter, skip it.
- */
-static inline bool
-ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
-{
-       /* If ops isn't enabled, ignore it */
-       if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
-               return false;
-
-       /* If ops traces all then it includes this function */
-       if (ops_traces_mod(ops))
-               return true;
-
-       /* The function must be in the filter */
-       if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
-           !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
-               return false;
-
-       /* If in notrace hash, we ignore it too */
-       if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
-               return false;
-
-       return true;
-}
-
 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
 {
+       bool init_nop = ftrace_need_init_nop();
        struct ftrace_page *pg;
        struct dyn_ftrace *p;
        u64 start, stop;
@@ -2939,8 +3222,7 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
                         * Do the initial record conversion from mcount jump
                         * to the NOP instructions.
                         */
-                       if (!__is_defined(CC_USING_NOP_MCOUNT) &&
-                           !ftrace_code_disable(mod, p))
+                       if (init_nop && !ftrace_nop_initialize(mod, p))
                                break;
 
                        update_cnt++;
@@ -2957,19 +3239,15 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
 {
        int order;
+       int pages;
        int cnt;
 
        if (WARN_ON(!count))
                return -EINVAL;
 
-       order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
-
-       /*
-        * We want to fill as much as possible. No more than a page
-        * may be empty.
-        */
-       while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
-               order--;
+       /* We want to fill as much as possible, with no empty pages */
+       pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
+       order = fls(pages) - 1;
 
  again:
        pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
@@ -2978,12 +3256,15 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
                /* if we can't allocate this size, try something smaller */
                if (!order)
                        return -ENOMEM;
-               order >>= 1;
+               order--;
                goto again;
        }
 
+       ftrace_number_of_pages += 1 << order;
+       ftrace_number_of_groups++;
+
        cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
-       pg->size = cnt;
+       pg->order = order;
 
        if (cnt > count)
                cnt = count;
@@ -2991,12 +3272,27 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
        return cnt;
 }
 
+static void ftrace_free_pages(struct ftrace_page *pages)
+{
+       struct ftrace_page *pg = pages;
+
+       while (pg) {
+               if (pg->records) {
+                       free_pages((unsigned long)pg->records, pg->order);
+                       ftrace_number_of_pages -= 1 << pg->order;
+               }
+               pages = pg->next;
+               kfree(pg);
+               pg = pages;
+               ftrace_number_of_groups--;
+       }
+}
+
 static struct ftrace_page *
 ftrace_allocate_pages(unsigned long num_to_init)
 {
        struct ftrace_page *start_pg;
        struct ftrace_page *pg;
-       int order;
        int cnt;
 
        if (!num_to_init)
@@ -3030,14 +3326,7 @@ ftrace_allocate_pages(unsigned long num_to_init)
        return start_pg;
 
  free_pages:
-       pg = start_pg;
-       while (pg) {
-               order = get_count_order(pg->size / ENTRIES_PER_PAGE);
-               free_pages((unsigned long)pg->records, order);
-               start_pg = pg->next;
-               kfree(pg);
-               pg = start_pg;
-       }
+       ftrace_free_pages(start_pg);
        pr_info("ftrace: FAILED to allocate memory for functions\n");
        return NULL;
 }
@@ -3093,6 +3382,14 @@ t_probe_next(struct seq_file *m, loff_t *pos)
                hnd = &iter->probe_entry->hlist;
 
        hash = iter->probe->ops.func_hash->filter_hash;
+
+       /*
+        * A probe being registered may temporarily have an empty hash
+        * and it's at the end of the func_probes list.
+        */
+       if (!hash || hash == EMPTY_HASH)
+               return NULL;
+
        size = 1 << hash->size_bits;
 
  retry:
@@ -3282,7 +3579,10 @@ t_func_next(struct seq_file *m, loff_t *pos)
                     !ftrace_lookup_ip(iter->hash, rec->ip)) ||
 
                    ((iter->flags & FTRACE_ITER_ENABLED) &&
-                    !(rec->flags & FTRACE_FL_ENABLED))) {
+                    !(rec->flags & FTRACE_FL_ENABLED)) ||
+
+                   ((iter->flags & FTRACE_ITER_TOUCHED) &&
+                    !(rec->flags & FTRACE_FL_TOUCHED))) {
 
                        rec = NULL;
                        goto retry;
@@ -3411,6 +3711,105 @@ static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
                seq_printf(m, " ->%pS", ptr);
 }
 
+#ifdef FTRACE_MCOUNT_MAX_OFFSET
+/*
+ * Weak functions can still have an mcount/fentry that is saved in
+ * the __mcount_loc section. These can be detected by having a
+ * symbol offset of greater than FTRACE_MCOUNT_MAX_OFFSET, as the
+ * symbol found by kallsyms is not the function that the mcount/fentry
+ * is part of. The offset is much greater in these cases.
+ *
+ * Test the record to make sure that the ip points to a valid kallsyms
+ * and if not, mark it disabled.
+ */
+static int test_for_valid_rec(struct dyn_ftrace *rec)
+{
+       char str[KSYM_SYMBOL_LEN];
+       unsigned long offset;
+       const char *ret;
+
+       ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str);
+
+       /* Weak functions can cause invalid addresses */
+       if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
+               rec->flags |= FTRACE_FL_DISABLED;
+               return 0;
+       }
+       return 1;
+}
+
+static struct workqueue_struct *ftrace_check_wq __initdata;
+static struct work_struct ftrace_check_work __initdata;
+
+/*
+ * Scan all the mcount/fentry entries to make sure they are valid.
+ */
+static __init void ftrace_check_work_func(struct work_struct *work)
+{
+       struct ftrace_page *pg;
+       struct dyn_ftrace *rec;
+
+       mutex_lock(&ftrace_lock);
+       do_for_each_ftrace_rec(pg, rec) {
+               test_for_valid_rec(rec);
+       } while_for_each_ftrace_rec();
+       mutex_unlock(&ftrace_lock);
+}
+
+static int __init ftrace_check_for_weak_functions(void)
+{
+       INIT_WORK(&ftrace_check_work, ftrace_check_work_func);
+
+       ftrace_check_wq = alloc_workqueue("ftrace_check_wq", WQ_UNBOUND, 0);
+
+       queue_work(ftrace_check_wq, &ftrace_check_work);
+       return 0;
+}
+
+static int __init ftrace_check_sync(void)
+{
+       /* Make sure the ftrace_check updates are finished */
+       if (ftrace_check_wq)
+               destroy_workqueue(ftrace_check_wq);
+       return 0;
+}
+
+late_initcall_sync(ftrace_check_sync);
+subsys_initcall(ftrace_check_for_weak_functions);
+
+static int print_rec(struct seq_file *m, unsigned long ip)
+{
+       unsigned long offset;
+       char str[KSYM_SYMBOL_LEN];
+       char *modname;
+       const char *ret;
+
+       ret = kallsyms_lookup(ip, NULL, &offset, &modname, str);
+       /* Weak functions can cause invalid addresses */
+       if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
+               snprintf(str, KSYM_SYMBOL_LEN, "%s_%ld",
+                        FTRACE_INVALID_FUNCTION, offset);
+               ret = NULL;
+       }
+
+       seq_puts(m, str);
+       if (modname)
+               seq_printf(m, " [%s]", modname);
+       return ret == NULL ? -1 : 0;
+}
+#else
+static inline int test_for_valid_rec(struct dyn_ftrace *rec)
+{
+       return 1;
+}
+
+static inline int print_rec(struct seq_file *m, unsigned long ip)
+{
+       seq_printf(m, "%ps", (void *)ip);
+       return 0;
+}
+#endif
+
 static int t_show(struct seq_file *m, void *v)
 {
        struct ftrace_iterator *iter = m->private;
@@ -3435,14 +3834,26 @@ static int t_show(struct seq_file *m, void *v)
        if (!rec)
                return 0;
 
-       seq_printf(m, "%ps", (void *)rec->ip);
-       if (iter->flags & FTRACE_ITER_ENABLED) {
+       if (iter->flags & FTRACE_ITER_ADDRS)
+               seq_printf(m, "%lx ", rec->ip);
+
+       if (print_rec(m, rec->ip)) {
+               /* This should only happen when a rec is disabled */
+               WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED));
+               seq_putc(m, '\n');
+               return 0;
+       }
+
+       if (iter->flags & (FTRACE_ITER_ENABLED | FTRACE_ITER_TOUCHED)) {
                struct ftrace_ops *ops;
 
-               seq_printf(m, " (%ld)%s%s",
+               seq_printf(m, " (%ld)%s%s%s%s%s",
                           ftrace_rec_count(rec),
                           rec->flags & FTRACE_FL_REGS ? " R" : "  ",
-                          rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ");
+                          rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ",
+                          rec->flags & FTRACE_FL_DIRECT ? " D" : "  ",
+                          rec->flags & FTRACE_FL_CALL_OPS ? " O" : "  ",
+                          rec->flags & FTRACE_FL_MODIFIED ? " M " : "   ");
                if (rec->flags & FTRACE_FL_TRAMP_EN) {
                        ops = ftrace_find_tramp_ops_any(rec);
                        if (ops) {
@@ -3458,7 +3869,23 @@ static int t_show(struct seq_file *m, void *v)
                } else {
                        add_trampoline_func(m, NULL, rec);
                }
-       }       
+               if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
+                       ops = ftrace_find_unique_ops(rec);
+                       if (ops) {
+                               seq_printf(m, "\tops: %pS (%pS)",
+                                          ops, ops->func);
+                       } else {
+                               seq_puts(m, "\tops: ERROR!");
+                       }
+               }
+               if (rec->flags & FTRACE_FL_DIRECT) {
+                       unsigned long direct;
+
+                       direct = ftrace_find_rec_direct(rec->ip);
+                       if (direct)
+                               seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
+               }
+       }
 
        seq_putc(m, '\n');
 
@@ -3476,6 +3903,11 @@ static int
 ftrace_avail_open(struct inode *inode, struct file *file)
 {
        struct ftrace_iterator *iter;
+       int ret;
+
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
 
        if (unlikely(ftrace_disabled))
                return -ENODEV;
@@ -3495,6 +3927,15 @@ ftrace_enabled_open(struct inode *inode, struct file *file)
 {
        struct ftrace_iterator *iter;
 
+       /*
+        * This shows us what functions are currently being
+        * traced and by what. Not sure if we want lockdown
+        * to hide such critical information for an admin.
+        * Although, perhaps it can show information we don't
+        * want people to see, but if something is tracing
+        * something, we probably want to know about it.
+        */
+
        iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
        if (!iter)
                return -ENOMEM;
@@ -3506,6 +3947,55 @@ ftrace_enabled_open(struct inode *inode, struct file *file)
        return 0;
 }
 
+static int
+ftrace_touched_open(struct inode *inode, struct file *file)
+{
+       struct ftrace_iterator *iter;
+
+       /*
+        * This shows us what functions have ever been enabled
+        * (traced, direct, patched, etc). Not sure if we want lockdown
+        * to hide such critical information for an admin.
+        * Although, perhaps it can show information we don't
+        * want people to see, but if something had traced
+        * something, we probably want to know about it.
+        */
+
+       iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
+       if (!iter)
+               return -ENOMEM;
+
+       iter->pg = ftrace_pages_start;
+       iter->flags = FTRACE_ITER_TOUCHED;
+       iter->ops = &global_ops;
+
+       return 0;
+}
+
+static int
+ftrace_avail_addrs_open(struct inode *inode, struct file *file)
+{
+       struct ftrace_iterator *iter;
+       int ret;
+
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
+       if (unlikely(ftrace_disabled))
+               return -ENODEV;
+
+       iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
+       if (!iter)
+               return -ENOMEM;
+
+       iter->pg = ftrace_pages_start;
+       iter->flags = FTRACE_ITER_ADDRS;
+       iter->ops = &global_ops;
+
+       return 0;
+}
+
 /**
  * ftrace_regex_open - initialize function tracer filter files
  * @ops: The ftrace_ops that hold the hash filters
@@ -3521,6 +4011,8 @@ ftrace_enabled_open(struct inode *inode, struct file *file)
  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
  * tracing_lseek() should be used as the lseek routine, and
  * release must call ftrace_regex_release().
+ *
+ * Returns: 0 on success or a negative errno value on failure
  */
 int
 ftrace_regex_open(struct ftrace_ops *ops, int flag,
@@ -3530,21 +4022,22 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
        struct ftrace_hash *hash;
        struct list_head *mod_head;
        struct trace_array *tr = ops->private;
-       int ret = 0;
+       int ret = -ENOMEM;
 
        ftrace_ops_init(ops);
 
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
+       if (tracing_check_open_get_tr(tr))
+               return -ENODEV;
+
        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
        if (!iter)
-               return -ENOMEM;
+               goto out;
 
-       if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
-               kfree(iter);
-               return -ENOMEM;
-       }
+       if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
+               goto out;
 
        iter->ops = ops;
        iter->flags = flag;
@@ -3574,13 +4067,13 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
 
                if (!iter->hash) {
                        trace_parser_put(&iter->parser);
-                       kfree(iter);
-                       ret = -ENOMEM;
                        goto out_unlock;
                }
        } else
                iter->hash = hash;
 
+       ret = 0;
+
        if (file->f_mode & FMODE_READ) {
                iter->pg = ftrace_pages_start;
 
@@ -3592,7 +4085,6 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
                        /* Failed */
                        free_ftrace_hash(iter->hash);
                        trace_parser_put(&iter->parser);
-                       kfree(iter);
                }
        } else
                file->private_data = iter;
@@ -3600,14 +4092,22 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
  out_unlock:
        mutex_unlock(&ops->func_hash->regex_lock);
 
-       return ret;
-}
-
+ out:
+       if (ret) {
+               kfree(iter);
+               if (tr)
+                       trace_array_put(tr);
+       }
+
+       return ret;
+}
+
 static int
 ftrace_filter_open(struct inode *inode, struct file *file)
 {
        struct ftrace_ops *ops = inode->i_private;
 
+       /* Checks for tracefs lockdown */
        return ftrace_regex_open(ops,
                        FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
                        inode, file);
@@ -3618,6 +4118,7 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
 {
        struct ftrace_ops *ops = inode->i_private;
 
+       /* Checks for tracefs lockdown */
        return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
                                 inode, file);
 }
@@ -3691,8 +4192,8 @@ enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
                /* Do nothing if it exists */
                if (entry)
                        return 0;
-
-               ret = add_hash_entry(hash, rec->ip);
+               if (add_hash_entry(hash, rec->ip) == NULL)
+                       ret = -ENOMEM;
        }
        return ret;
 }
@@ -3722,6 +4223,24 @@ add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
        return 0;
 }
 
+#ifdef FTRACE_MCOUNT_MAX_OFFSET
+static int lookup_ip(unsigned long ip, char **modname, char *str)
+{
+       unsigned long offset;
+
+       kallsyms_lookup(ip, NULL, &offset, modname, str);
+       if (offset > FTRACE_MCOUNT_MAX_OFFSET)
+               return -1;
+       return 0;
+}
+#else
+static int lookup_ip(unsigned long ip, char **modname, char *str)
+{
+       kallsyms_lookup(ip, NULL, NULL, modname, str);
+       return 0;
+}
+#endif
+
 static int
 ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
                struct ftrace_glob *mod_g, int exclude_mod)
@@ -3729,7 +4248,12 @@ ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
        char str[KSYM_SYMBOL_LEN];
        char *modname;
 
-       kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
+       if (lookup_ip(rec->ip, &modname, str)) {
+               /* This should only happen when a rec is disabled */
+               WARN_ON_ONCE(system_state == SYSTEM_RUNNING &&
+                            !(rec->flags & FTRACE_FL_DISABLED));
+               return 0;
+       }
 
        if (mod_g) {
                int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
@@ -3808,6 +4332,7 @@ match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
                        }
                        found = 1;
                }
+               cond_resched();
        } while_for_each_ftrace_rec();
  out_unlock:
        mutex_unlock(&ftrace_lock);
@@ -3945,7 +4470,6 @@ static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
        struct ftrace_hash **orig_hash, *new_hash;
        LIST_HEAD(process_mods);
        char *func;
-       int ret;
 
        mutex_lock(&ops->func_hash->regex_lock);
 
@@ -3974,8 +4498,7 @@ static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
                if (!func) /* warn? */
                        continue;
 
-               list_del(&ftrace_mod->list);
-               list_add(&ftrace_mod->list, &process_mods);
+               list_move(&ftrace_mod->list, &process_mods);
 
                /* Use the newly allocated func, as it may be "*" */
                kfree(ftrace_mod->func);
@@ -3998,7 +4521,7 @@ static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
 
        mutex_lock(&ftrace_lock);
 
-       ret = ftrace_hash_move_and_update_ops(ops, orig_hash,
+       ftrace_hash_move_and_update_ops(ops, orig_hash,
                                              new_hash, enable);
        mutex_unlock(&ftrace_lock);
 
@@ -4076,7 +4599,7 @@ static int __init ftrace_mod_cmd_init(void)
 core_initcall(ftrace_mod_cmd_init);
 
 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
-                                     struct ftrace_ops *op, struct pt_regs *pt_regs)
+                                     struct ftrace_ops *op, struct ftrace_regs *fregs)
 {
        struct ftrace_probe_ops *probe_ops;
        struct ftrace_func_probe *probe;
@@ -4106,7 +4629,7 @@ struct ftrace_func_mapper {
 /**
  * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
  *
- * Returns a ftrace_func_mapper descriptor that can be used to map ips to data.
+ * Returns: a ftrace_func_mapper descriptor that can be used to map ips to data.
  */
 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
 {
@@ -4126,7 +4649,7 @@ struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
  * @mapper: The mapper that has the ip maps
  * @ip: the instruction pointer to find the data for
  *
- * Returns the data mapped to @ip if found otherwise NULL. The return
+ * Returns: the data mapped to @ip if found otherwise NULL. The return
  * is actually the address of the mapper data pointer. The address is
  * returned for use cases where the data is no bigger than a long, and
  * the user can use the data pointer as its data instead of having to
@@ -4152,7 +4675,7 @@ void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
  * @ip: The instruction pointer address to map @data to
  * @data: The data to map to @ip
  *
- * Returns 0 on succes otherwise an error.
+ * Returns: 0 on success otherwise an error.
  */
 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
                              unsigned long ip, void *data)
@@ -4181,8 +4704,8 @@ int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
  * @mapper: The mapper that has the ip maps
  * @ip: The instruction pointer address to remove the data from
  *
- * Returns the data if it is found, otherwise NULL.
- * Note, if the data pointer is used as the data itself, (see 
+ * Returns: the data if it is found, otherwise NULL.
+ * Note, if the data pointer is used as the data itself, (see
  * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
  * if the data pointer was set to zero.
  */
@@ -4277,8 +4800,8 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
                               struct ftrace_probe_ops *probe_ops,
                               void *data)
 {
+       struct ftrace_func_probe *probe = NULL, *iter;
        struct ftrace_func_entry *entry;
-       struct ftrace_func_probe *probe;
        struct ftrace_hash **orig_hash;
        struct ftrace_hash *old_hash;
        struct ftrace_hash *hash;
@@ -4297,11 +4820,13 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
 
        mutex_lock(&ftrace_lock);
        /* Check if the probe_ops is already registered */
-       list_for_each_entry(probe, &tr->func_probes, list) {
-               if (probe->probe_ops == probe_ops)
+       list_for_each_entry(iter, &tr->func_probes, list) {
+               if (iter->probe_ops == probe_ops) {
+                       probe = iter;
                        break;
+               }
        }
-       if (&probe->list == &tr->func_probes) {
+       if (!probe) {
                probe = kzalloc(sizeof(*probe), GFP_KERNEL);
                if (!probe) {
                        mutex_unlock(&ftrace_lock);
@@ -4318,12 +4843,21 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
 
        mutex_unlock(&ftrace_lock);
 
+       /*
+        * Note, there's a small window here that the func_hash->filter_hash
+        * may be NULL or empty. Need to be careful when reading the loop.
+        */
        mutex_lock(&probe->ops.func_hash->regex_lock);
 
        orig_hash = &probe->ops.func_hash->filter_hash;
        old_hash = *orig_hash;
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
 
+       if (!hash) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
        ret = ftrace_match_records(hash, glob, strlen(glob));
 
        /* Nothing found? */
@@ -4410,9 +4944,9 @@ int
 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
                                      struct ftrace_probe_ops *probe_ops)
 {
+       struct ftrace_func_probe *probe = NULL, *iter;
        struct ftrace_ops_hash old_hash_ops;
        struct ftrace_func_entry *entry;
-       struct ftrace_func_probe *probe;
        struct ftrace_glob func_g;
        struct ftrace_hash **orig_hash;
        struct ftrace_hash *old_hash;
@@ -4440,11 +4974,13 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
 
        mutex_lock(&ftrace_lock);
        /* Check if the probe_ops is already registered */
-       list_for_each_entry(probe, &tr->func_probes, list) {
-               if (probe->probe_ops == probe_ops)
+       list_for_each_entry(iter, &tr->func_probes, list) {
+               if (iter->probe_ops == probe_ops) {
+                       probe = iter;
                        break;
+               }
        }
-       if (&probe->list == &tr->func_probes)
+       if (!probe)
                goto err_unlock_ftrace;
 
        ret = -EINVAL;
@@ -4683,11 +5219,12 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf,
 }
 
 static int
-ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
+__ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
 {
        struct ftrace_func_entry *entry;
 
-       if (!ftrace_location(ip))
+       ip = ftrace_location(ip);
+       if (!ip)
                return -EINVAL;
 
        if (remove) {
@@ -4698,12 +5235,34 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
                return 0;
        }
 
-       return add_hash_entry(hash, ip);
+       entry = add_hash_entry(hash, ip);
+       return entry ? 0 :  -ENOMEM;
+}
+
+static int
+ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips,
+                 unsigned int cnt, int remove)
+{
+       unsigned int i;
+       int err;
+
+       for (i = 0; i < cnt; i++) {
+               err = __ftrace_match_addr(hash, ips[i], remove);
+               if (err) {
+                       /*
+                        * This expects the @hash is a temporary hash and if this
+                        * fails the caller must free the @hash.
+                        */
+                       return err;
+               }
+       }
+       return 0;
 }
 
 static int
 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
-               unsigned long ip, int remove, int reset, int enable)
+               unsigned long *ips, unsigned int cnt,
+               int remove, int reset, int enable)
 {
        struct ftrace_hash **orig_hash;
        struct ftrace_hash *hash;
@@ -4733,8 +5292,8 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
                ret = -EINVAL;
                goto out_regex_unlock;
        }
-       if (ip) {
-               ret = ftrace_match_addr(hash, ip, remove);
+       if (ips) {
+               ret = ftrace_match_addr(hash, ips, cnt, remove);
                if (ret < 0)
                        goto out_regex_unlock;
        }
@@ -4751,33 +5310,370 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
 }
 
 static int
-ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
-               int reset, int enable)
+ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt,
+               int remove, int reset, int enable)
+{
+       return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable);
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+
+struct ftrace_direct_func {
+       struct list_head        next;
+       unsigned long           addr;
+       int                     count;
+};
+
+static LIST_HEAD(ftrace_direct_funcs);
+
+static int register_ftrace_function_nolock(struct ftrace_ops *ops);
+
+/*
+ * If there are multiple ftrace_ops, use SAVE_REGS by default, so that direct
+ * call will be jumped from ftrace_regs_caller. Only if the architecture does
+ * not support ftrace_regs_caller but direct_call, use SAVE_ARGS so that it
+ * jumps from ftrace_caller for multiple ftrace_ops.
+ */
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
+#define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_ARGS)
+#else
+#define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS)
+#endif
+
+static int check_direct_multi(struct ftrace_ops *ops)
+{
+       if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
+               return -EINVAL;
+       if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS)
+               return -EINVAL;
+       return 0;
+}
+
+static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr)
+{
+       struct ftrace_func_entry *entry, *del;
+       int size, i;
+
+       size = 1 << hash->size_bits;
+       for (i = 0; i < size; i++) {
+               hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
+                       del = __ftrace_lookup_ip(direct_functions, entry->ip);
+                       if (del && del->direct == addr) {
+                               remove_hash_entry(direct_functions, del);
+                               kfree(del);
+                       }
+               }
+       }
+}
+
+/**
+ * register_ftrace_direct - Call a custom trampoline directly
+ * for multiple functions registered in @ops
+ * @ops: The address of the struct ftrace_ops object
+ * @addr: The address of the trampoline to call at @ops functions
+ *
+ * This is used to connect a direct calls to @addr from the nop locations
+ * of the functions registered in @ops (with by ftrace_set_filter_ip
+ * function).
+ *
+ * The location that it calls (@addr) must be able to handle a direct call,
+ * and save the parameters of the function being traced, and restore them
+ * (or inject new ones if needed), before returning.
+ *
+ * Returns:
+ *  0 on success
+ *  -EINVAL  - The @ops object was already registered with this call or
+ *             when there are no functions in @ops object.
+ *  -EBUSY   - Another direct function is already attached (there can be only one)
+ *  -ENODEV  - @ip does not point to a ftrace nop location (or not supported)
+ *  -ENOMEM  - There was an allocation failure.
+ */
+int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
+{
+       struct ftrace_hash *hash, *new_hash = NULL, *free_hash = NULL;
+       struct ftrace_func_entry *entry, *new;
+       int err = -EBUSY, size, i;
+
+       if (ops->func || ops->trampoline)
+               return -EINVAL;
+       if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
+               return -EINVAL;
+       if (ops->flags & FTRACE_OPS_FL_ENABLED)
+               return -EINVAL;
+
+       hash = ops->func_hash->filter_hash;
+       if (ftrace_hash_empty(hash))
+               return -EINVAL;
+
+       mutex_lock(&direct_mutex);
+
+       /* Make sure requested entries are not already registered.. */
+       size = 1 << hash->size_bits;
+       for (i = 0; i < size; i++) {
+               hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
+                       if (ftrace_find_rec_direct(entry->ip))
+                               goto out_unlock;
+               }
+       }
+
+       err = -ENOMEM;
+
+       /* Make a copy hash to place the new and the old entries in */
+       size = hash->count + direct_functions->count;
+       if (size > 32)
+               size = 32;
+       new_hash = alloc_ftrace_hash(fls(size));
+       if (!new_hash)
+               goto out_unlock;
+
+       /* Now copy over the existing direct entries */
+       size = 1 << direct_functions->size_bits;
+       for (i = 0; i < size; i++) {
+               hlist_for_each_entry(entry, &direct_functions->buckets[i], hlist) {
+                       new = add_hash_entry(new_hash, entry->ip);
+                       if (!new)
+                               goto out_unlock;
+                       new->direct = entry->direct;
+               }
+       }
+
+       /* ... and add the new entries */
+       size = 1 << hash->size_bits;
+       for (i = 0; i < size; i++) {
+               hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
+                       new = add_hash_entry(new_hash, entry->ip);
+                       if (!new)
+                               goto out_unlock;
+                       /* Update both the copy and the hash entry */
+                       new->direct = addr;
+                       entry->direct = addr;
+               }
+       }
+
+       free_hash = direct_functions;
+       rcu_assign_pointer(direct_functions, new_hash);
+       new_hash = NULL;
+
+       ops->func = call_direct_funcs;
+       ops->flags = MULTI_FLAGS;
+       ops->trampoline = FTRACE_REGS_ADDR;
+       ops->direct_call = addr;
+
+       err = register_ftrace_function_nolock(ops);
+
+ out_unlock:
+       mutex_unlock(&direct_mutex);
+
+       if (free_hash && free_hash != EMPTY_HASH) {
+               synchronize_rcu_tasks();
+               free_ftrace_hash(free_hash);
+       }
+
+       if (new_hash)
+               free_ftrace_hash(new_hash);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(register_ftrace_direct);
+
+/**
+ * unregister_ftrace_direct - Remove calls to custom trampoline
+ * previously registered by register_ftrace_direct for @ops object.
+ * @ops: The address of the struct ftrace_ops object
+ *
+ * This is used to remove a direct calls to @addr from the nop locations
+ * of the functions registered in @ops (with by ftrace_set_filter_ip
+ * function).
+ *
+ * Returns:
+ *  0 on success
+ *  -EINVAL - The @ops object was not properly registered.
+ */
+int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
+                            bool free_filters)
+{
+       struct ftrace_hash *hash = ops->func_hash->filter_hash;
+       int err;
+
+       if (check_direct_multi(ops))
+               return -EINVAL;
+       if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+               return -EINVAL;
+
+       mutex_lock(&direct_mutex);
+       err = unregister_ftrace_function(ops);
+       remove_direct_functions_hash(hash, addr);
+       mutex_unlock(&direct_mutex);
+
+       /* cleanup for possible another register call */
+       ops->func = NULL;
+       ops->trampoline = 0;
+
+       if (free_filters)
+               ftrace_free_filter(ops);
+       return err;
+}
+EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
+
+static int
+__modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
+{
+       struct ftrace_hash *hash;
+       struct ftrace_func_entry *entry, *iter;
+       static struct ftrace_ops tmp_ops = {
+               .func           = ftrace_stub,
+               .flags          = FTRACE_OPS_FL_STUB,
+       };
+       int i, size;
+       int err;
+
+       lockdep_assert_held_once(&direct_mutex);
+
+       /* Enable the tmp_ops to have the same functions as the direct ops */
+       ftrace_ops_init(&tmp_ops);
+       tmp_ops.func_hash = ops->func_hash;
+       tmp_ops.direct_call = addr;
+
+       err = register_ftrace_function_nolock(&tmp_ops);
+       if (err)
+               return err;
+
+       /*
+        * Now the ftrace_ops_list_func() is called to do the direct callers.
+        * We can safely change the direct functions attached to each entry.
+        */
+       mutex_lock(&ftrace_lock);
+
+       hash = ops->func_hash->filter_hash;
+       size = 1 << hash->size_bits;
+       for (i = 0; i < size; i++) {
+               hlist_for_each_entry(iter, &hash->buckets[i], hlist) {
+                       entry = __ftrace_lookup_ip(direct_functions, iter->ip);
+                       if (!entry)
+                               continue;
+                       entry->direct = addr;
+               }
+       }
+       /* Prevent store tearing if a trampoline concurrently accesses the value */
+       WRITE_ONCE(ops->direct_call, addr);
+
+       mutex_unlock(&ftrace_lock);
+
+       /* Removing the tmp_ops will add the updated direct callers to the functions */
+       unregister_ftrace_function(&tmp_ops);
+
+       return err;
+}
+
+/**
+ * modify_ftrace_direct_nolock - Modify an existing direct 'multi' call
+ * to call something else
+ * @ops: The address of the struct ftrace_ops object
+ * @addr: The address of the new trampoline to call at @ops functions
+ *
+ * This is used to unregister currently registered direct caller and
+ * register new one @addr on functions registered in @ops object.
+ *
+ * Note there's window between ftrace_shutdown and ftrace_startup calls
+ * where there will be no callbacks called.
+ *
+ * Caller should already have direct_mutex locked, so we don't lock
+ * direct_mutex here.
+ *
+ * Returns: zero on success. Non zero on error, which includes:
+ *  -EINVAL - The @ops object was not properly registered.
+ */
+int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr)
+{
+       if (check_direct_multi(ops))
+               return -EINVAL;
+       if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+               return -EINVAL;
+
+       return __modify_ftrace_direct(ops, addr);
+}
+EXPORT_SYMBOL_GPL(modify_ftrace_direct_nolock);
+
+/**
+ * modify_ftrace_direct - Modify an existing direct 'multi' call
+ * to call something else
+ * @ops: The address of the struct ftrace_ops object
+ * @addr: The address of the new trampoline to call at @ops functions
+ *
+ * This is used to unregister currently registered direct caller and
+ * register new one @addr on functions registered in @ops object.
+ *
+ * Note there's window between ftrace_shutdown and ftrace_startup calls
+ * where there will be no callbacks called.
+ *
+ * Returns: zero on success. Non zero on error, which includes:
+ *  -EINVAL - The @ops object was not properly registered.
+ */
+int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
 {
-       return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable);
+       int err;
+
+       if (check_direct_multi(ops))
+               return -EINVAL;
+       if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+               return -EINVAL;
+
+       mutex_lock(&direct_mutex);
+       err = __modify_ftrace_direct(ops, addr);
+       mutex_unlock(&direct_mutex);
+       return err;
 }
+EXPORT_SYMBOL_GPL(modify_ftrace_direct);
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
 
 /**
  * ftrace_set_filter_ip - set a function to filter on in ftrace by address
- * @ops - the ops to set the filter with
- * @ip - the address to add to or remove from the filter.
- * @remove - non zero to remove the ip from the filter
- * @reset - non zero to reset all filters before applying this filter.
+ * @ops: the ops to set the filter with
+ * @ip: the address to add to or remove from the filter.
+ * @remove: non zero to remove the ip from the filter
+ * @reset: non zero to reset all filters before applying this filter.
  *
  * Filters denote which functions should be enabled when tracing is enabled
- * If @ip is NULL, it failes to update filter.
+ * If @ip is NULL, it fails to update filter.
+ *
+ * This can allocate memory which must be freed before @ops can be freed,
+ * either by removing each filtered addr or by using
+ * ftrace_free_filter(@ops).
  */
 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
                         int remove, int reset)
 {
        ftrace_ops_init(ops);
-       return ftrace_set_addr(ops, ip, remove, reset, 1);
+       return ftrace_set_addr(ops, &ip, 1, remove, reset, 1);
 }
 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
 
+/**
+ * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses
+ * @ops: the ops to set the filter with
+ * @ips: the array of addresses to add to or remove from the filter.
+ * @cnt: the number of addresses in @ips
+ * @remove: non zero to remove ips from the filter
+ * @reset: non zero to reset all filters before applying this filter.
+ *
+ * Filters denote which functions should be enabled when tracing is enabled
+ * If @ips array or any ip specified within is NULL , it fails to update filter.
+ *
+ * This can allocate memory which must be freed before @ops can be freed,
+ * either by removing each filtered addr or by using
+ * ftrace_free_filter(@ops).
+*/
+int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
+                         unsigned int cnt, int remove, int reset)
+{
+       ftrace_ops_init(ops);
+       return ftrace_set_addr(ops, ips, cnt, remove, reset, 1);
+}
+EXPORT_SYMBOL_GPL(ftrace_set_filter_ips);
+
 /**
  * ftrace_ops_set_global_filter - setup ops to use global filters
- * @ops - the ops which will use the global filters
+ * @ops: the ops which will use the global filters
  *
  * ftrace users who need global function trace filtering should call this.
  * It can set the global filter only if ops were not initialized before.
@@ -4796,18 +5692,22 @@ static int
 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
                 int reset, int enable)
 {
-       return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
+       return ftrace_set_hash(ops, buf, len, NULL, 0, 0, reset, enable);
 }
 
 /**
  * ftrace_set_filter - set a function to filter on in ftrace
- * @ops - the ops to set the filter with
- * @buf - the string that holds the function filter text.
- * @len - the length of the string.
- * @reset - non zero to reset all filters before applying this filter.
+ * @ops: the ops to set the filter with
+ * @buf: the string that holds the function filter text.
+ * @len: the length of the string.
+ * @reset: non-zero to reset all filters before applying this filter.
  *
  * Filters denote which functions should be enabled when tracing is enabled.
  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
+ *
+ * This can allocate memory which must be freed before @ops can be freed,
+ * either by removing each filtered addr or by using
+ * ftrace_free_filter(@ops).
  */
 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
                       int len, int reset)
@@ -4819,14 +5719,18 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter);
 
 /**
  * ftrace_set_notrace - set a function to not trace in ftrace
- * @ops - the ops to set the notrace filter with
- * @buf - the string that holds the function notrace text.
- * @len - the length of the string.
- * @reset - non zero to reset all filters before applying this filter.
+ * @ops: the ops to set the notrace filter with
+ * @buf: the string that holds the function notrace text.
+ * @len: the length of the string.
+ * @reset: non-zero to reset all filters before applying this filter.
  *
  * Notrace Filters denote which functions should not be enabled when tracing
  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
  * for tracing.
+ *
+ * This can allocate memory which must be freed before @ops can be freed,
+ * either by removing each filtered addr or by using
+ * ftrace_free_filter(@ops).
  */
 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
                        int len, int reset)
@@ -4837,9 +5741,9 @@ int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
 /**
  * ftrace_set_global_filter - set a function to filter on with global tracers
- * @buf - the string that holds the function filter text.
- * @len - the length of the string.
- * @reset - non zero to reset all filters before applying this filter.
+ * @buf: the string that holds the function filter text.
+ * @len: the length of the string.
+ * @reset: non-zero to reset all filters before applying this filter.
  *
  * Filters denote which functions should be enabled when tracing is enabled.
  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
@@ -4852,9 +5756,9 @@ EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
 
 /**
  * ftrace_set_global_notrace - set a function to not trace with global tracers
- * @buf - the string that holds the function notrace text.
- * @len - the length of the string.
- * @reset - non zero to reset all filters before applying this filter.
+ * @buf: the string that holds the function notrace text.
+ * @len: the length of the string.
+ * @reset: non-zero to reset all filters before applying this filter.
  *
  * Notrace Filters denote which functions should not be enabled when tracing
  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
@@ -4879,7 +5783,7 @@ bool ftrace_filter_param __initdata;
 static int __init set_ftrace_notrace(char *str)
 {
        ftrace_filter_param = true;
-       strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
+       strscpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
        return 1;
 }
 __setup("ftrace_notrace=", set_ftrace_notrace);
@@ -4887,7 +5791,7 @@ __setup("ftrace_notrace=", set_ftrace_notrace);
 static int __init set_ftrace_filter(char *str)
 {
        ftrace_filter_param = true;
-       strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
+       strscpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
        return 1;
 }
 __setup("ftrace_filter=", set_ftrace_filter);
@@ -4899,14 +5803,14 @@ static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
 
 static int __init set_graph_function(char *str)
 {
-       strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
+       strscpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
        return 1;
 }
 __setup("ftrace_graph_filter=", set_graph_function);
 
 static int __init set_graph_notrace_function(char *str)
 {
-       strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
+       strscpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
        return 1;
 }
 __setup("ftrace_graph_notrace=", set_graph_notrace_function);
@@ -4927,7 +5831,7 @@ static void __init set_ftrace_early_graph(char *buf, int enable)
        struct ftrace_hash *hash;
 
        hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
-       if (WARN_ON(!hash))
+       if (MEM_FAIL(!hash, "Failed to allocate hash\n"))
                return;
 
        while (buf) {
@@ -4980,7 +5884,6 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
        struct ftrace_hash **orig_hash;
        struct trace_parser *parser;
        int filter_hash;
-       int ret;
 
        if (file->f_mode & FMODE_READ) {
                iter = m->private;
@@ -4990,7 +5893,10 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
        parser = &iter->parser;
        if (trace_parser_loaded(parser)) {
-               ftrace_match_records(iter->hash, parser->buffer, parser->idx);
+               int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
+
+               ftrace_process_regex(iter, parser->buffer,
+                                    parser->idx, enable);
        }
 
        trace_parser_put(parser);
@@ -5002,13 +5908,17 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
                if (filter_hash) {
                        orig_hash = &iter->ops->func_hash->filter_hash;
-                       if (iter->tr && !list_empty(&iter->tr->mod_trace))
-                               iter->hash->flags |= FTRACE_HASH_FL_MOD;
+                       if (iter->tr) {
+                               if (list_empty(&iter->tr->mod_trace))
+                                       iter->hash->flags &= ~FTRACE_HASH_FL_MOD;
+                               else
+                                       iter->hash->flags |= FTRACE_HASH_FL_MOD;
+                       }
                } else
                        orig_hash = &iter->ops->func_hash->notrace_hash;
 
                mutex_lock(&ftrace_lock);
-               ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
+               ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
                                                      iter->hash, filter_hash);
                mutex_unlock(&ftrace_lock);
        } else {
@@ -5018,6 +5928,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
        mutex_unlock(&iter->ops->func_hash->regex_lock);
        free_ftrace_hash(iter->hash);
+       if (iter->tr)
+               trace_array_put(iter->tr);
        kfree(iter);
 
        return 0;
@@ -5037,6 +5949,20 @@ static const struct file_operations ftrace_enabled_fops = {
        .release = seq_release_private,
 };
 
+static const struct file_operations ftrace_touched_fops = {
+       .open = ftrace_touched_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release_private,
+};
+
+static const struct file_operations ftrace_avail_addrs_fops = {
+       .open = ftrace_avail_addrs_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release_private,
+};
+
 static const struct file_operations ftrace_filter_fops = {
        .open = ftrace_filter_open,
        .read = seq_read,
@@ -5057,8 +5983,8 @@ static const struct file_operations ftrace_notrace_fops = {
 
 static DEFINE_MUTEX(graph_lock);
 
-struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH;
-struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH;
+struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
+struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
 
 enum graph_filter_type {
        GRAPH_FILTER_NOTRACE    = 0,
@@ -5175,9 +6101,13 @@ static int
 __ftrace_graph_open(struct inode *inode, struct file *file,
                    struct ftrace_graph_data *fgd)
 {
-       int ret = 0;
+       int ret;
        struct ftrace_hash *new_hash = NULL;
 
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        if (file->f_mode & FMODE_WRITE) {
                const int size_bits = FTRACE_HASH_DEFAULT_BITS;
 
@@ -5329,8 +6259,16 @@ ftrace_graph_release(struct inode *inode, struct file *file)
 
                mutex_unlock(&graph_lock);
 
-               /* Wait till all users are no longer using the old hash */
-               synchronize_rcu();
+               /*
+                * We need to do a hard force of sched synchronization.
+                * This is because we use preempt_disable() to do RCU, but
+                * the function tracers can be called where RCU is not watching
+                * (like before user_exit()). We can not rely on the RCU
+                * infrastructure to do the synchronization, thus we must do it
+                * ourselves.
+                */
+               if (old_hash != EMPTY_HASH)
+                       synchronize_rcu_tasks_rude();
 
                free_ftrace_hash(old_hash);
        }
@@ -5378,7 +6316,7 @@ ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
 
                                if (entry)
                                        continue;
-                               if (add_hash_entry(hash, rec->ip) < 0)
+                               if (add_hash_entry(hash, rec->ip) == NULL)
                                        goto out;
                        } else {
                                if (entry) {
@@ -5453,10 +6391,10 @@ void ftrace_create_filter_files(struct ftrace_ops *ops,
                                struct dentry *parent)
 {
 
-       trace_create_file("set_ftrace_filter", 0644, parent,
+       trace_create_file("set_ftrace_filter", TRACE_MODE_WRITE, parent,
                          ops, &ftrace_filter_fops);
 
-       trace_create_file("set_ftrace_notrace", 0644, parent,
+       trace_create_file("set_ftrace_notrace", TRACE_MODE_WRITE, parent,
                          ops, &ftrace_notrace_fops);
 }
 
@@ -5483,19 +6421,25 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops)
 static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
 {
 
-       trace_create_file("available_filter_functions", 0444,
+       trace_create_file("available_filter_functions", TRACE_MODE_READ,
                        d_tracer, NULL, &ftrace_avail_fops);
 
-       trace_create_file("enabled_functions", 0444,
+       trace_create_file("available_filter_functions_addrs", TRACE_MODE_READ,
+                       d_tracer, NULL, &ftrace_avail_addrs_fops);
+
+       trace_create_file("enabled_functions", TRACE_MODE_READ,
                        d_tracer, NULL, &ftrace_enabled_fops);
 
+       trace_create_file("touched_functions", TRACE_MODE_READ,
+                       d_tracer, NULL, &ftrace_touched_fops);
+
        ftrace_create_filter_files(&global_ops, d_tracer);
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       trace_create_file("set_graph_function", 0644, d_tracer,
+       trace_create_file("set_graph_function", TRACE_MODE_WRITE, d_tracer,
                                    NULL,
                                    &ftrace_graph_fops);
-       trace_create_file("set_graph_notrace", 0644, d_tracer,
+       trace_create_file("set_graph_notrace", TRACE_MODE_WRITE, d_tracer,
                                    NULL,
                                    &ftrace_graph_notrace_fops);
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
@@ -5515,13 +6459,36 @@ static int ftrace_cmp_ips(const void *a, const void *b)
        return 0;
 }
 
+#ifdef CONFIG_FTRACE_SORT_STARTUP_TEST
+static void test_is_sorted(unsigned long *start, unsigned long count)
+{
+       int i;
+
+       for (i = 1; i < count; i++) {
+               if (WARN(start[i - 1] > start[i],
+                        "[%d] %pS at %lx is not sorted with %pS at %lx\n", i,
+                        (void *)start[i - 1], start[i - 1],
+                        (void *)start[i], start[i]))
+                       break;
+       }
+       if (i == count)
+               pr_info("ftrace section at %px sorted properly\n", start);
+}
+#else
+static void test_is_sorted(unsigned long *start, unsigned long count)
+{
+}
+#endif
+
 static int ftrace_process_locs(struct module *mod,
                               unsigned long *start,
                               unsigned long *end)
 {
+       struct ftrace_page *pg_unuse = NULL;
        struct ftrace_page *start_pg;
        struct ftrace_page *pg;
        struct dyn_ftrace *rec;
+       unsigned long skipped = 0;
        unsigned long count;
        unsigned long *p;
        unsigned long addr;
@@ -5533,8 +6500,17 @@ static int ftrace_process_locs(struct module *mod,
        if (!count)
                return 0;
 
-       sort(start, count, sizeof(*start),
-            ftrace_cmp_ips, NULL);
+       /*
+        * Sorting mcount in vmlinux at build time depend on
+        * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in
+        * modules can not be sorted at build time.
+        */
+       if (!IS_ENABLED(CONFIG_BUILDTIME_MCOUNT_SORT) || mod) {
+               sort(start, count, sizeof(*start),
+                    ftrace_cmp_ips, NULL);
+       } else {
+               test_is_sorted(start, count);
+       }
 
        start_pg = ftrace_allocate_pages(count);
        if (!start_pg)
@@ -5567,6 +6543,7 @@ static int ftrace_process_locs(struct module *mod,
        p = start;
        pg = start_pg;
        while (p < end) {
+               unsigned long end_offset;
                addr = ftrace_call_adjust(*p++);
                /*
                 * Some architecture linkers will pad between
@@ -5574,10 +6551,13 @@ static int ftrace_process_locs(struct module *mod,
                 * object files to satisfy alignments.
                 * Skip any NULL pointers.
                 */
-               if (!addr)
+               if (!addr) {
+                       skipped++;
                        continue;
+               }
 
-               if (pg->index == pg->size) {
+               end_offset = (pg->index+1) * sizeof(pg->records[0]);
+               if (end_offset > PAGE_SIZE << pg->order) {
                        /* We should have allocated enough */
                        if (WARN_ON(!pg->next))
                                break;
@@ -5588,8 +6568,10 @@ static int ftrace_process_locs(struct module *mod,
                rec->ip = addr;
        }
 
-       /* We should have used all pages */
-       WARN_ON(pg->next);
+       if (pg->next) {
+               pg_unuse = pg->next;
+               pg->next = NULL;
+       }
 
        /* Assign the last page to ftrace_pages */
        ftrace_pages = pg;
@@ -5611,6 +6593,11 @@ static int ftrace_process_locs(struct module *mod,
  out:
        mutex_unlock(&ftrace_lock);
 
+       /* We should have used all pages unless we skipped some */
+       if (pg_unuse) {
+               WARN_ON(!skipped);
+               ftrace_free_pages(pg_unuse);
+       }
        return ret;
 }
 
@@ -5631,20 +6618,84 @@ struct ftrace_mod_map {
        unsigned int            num_funcs;
 };
 
-#ifdef CONFIG_MODULES
+static int ftrace_get_trampoline_kallsym(unsigned int symnum,
+                                        unsigned long *value, char *type,
+                                        char *name, char *module_name,
+                                        int *exported)
+{
+       struct ftrace_ops *op;
 
-#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
+       list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) {
+               if (!op->trampoline || symnum--)
+                       continue;
+               *value = op->trampoline;
+               *type = 't';
+               strscpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN);
+               strscpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN);
+               *exported = 0;
+               return 0;
+       }
 
-static LIST_HEAD(ftrace_mod_maps);
+       return -ERANGE;
+}
 
-static int referenced_filters(struct dyn_ftrace *rec)
+#if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) || defined(CONFIG_MODULES)
+/*
+ * Check if the current ops references the given ip.
+ *
+ * If the ops traces all functions, then it was already accounted for.
+ * If the ops does not trace the current record function, skip it.
+ * If the ops ignores the function via notrace filter, skip it.
+ */
+static bool
+ops_references_ip(struct ftrace_ops *ops, unsigned long ip)
+{
+       /* If ops isn't enabled, ignore it */
+       if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+               return false;
+
+       /* If ops traces all then it includes this function */
+       if (ops_traces_mod(ops))
+               return true;
+
+       /* The function must be in the filter */
+       if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
+           !__ftrace_lookup_ip(ops->func_hash->filter_hash, ip))
+               return false;
+
+       /* If in notrace hash, we ignore it too */
+       if (ftrace_lookup_ip(ops->func_hash->notrace_hash, ip))
+               return false;
+
+       return true;
+}
+#endif
+
+#ifdef CONFIG_MODULES
+
+#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
+
+static LIST_HEAD(ftrace_mod_maps);
+
+static int referenced_filters(struct dyn_ftrace *rec)
 {
        struct ftrace_ops *ops;
        int cnt = 0;
 
        for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
-               if (ops_references_rec(ops, rec))
-                   cnt++;
+               if (ops_references_ip(ops, rec->ip)) {
+                       if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT))
+                               continue;
+                       if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY))
+                               continue;
+                       cnt++;
+                       if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
+                               rec->flags |= FTRACE_FL_REGS;
+                       if (cnt == 1 && ops->trampoline)
+                               rec->flags |= FTRACE_FL_TRAMP;
+                       else
+                               rec->flags &= ~FTRACE_FL_TRAMP;
+               }
        }
 
        return cnt;
@@ -5673,7 +6724,7 @@ clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
        }
 }
 
-/* Clear any records from hashs */
+/* Clear any records from hashes */
 static void clear_mod_from_hashes(struct ftrace_page *pg)
 {
        struct trace_array *tr;
@@ -5714,7 +6765,6 @@ void ftrace_release_mod(struct module *mod)
        struct ftrace_page **last_pg;
        struct ftrace_page *tmp_page = NULL;
        struct ftrace_page *pg;
-       int order;
 
        mutex_lock(&ftrace_lock);
 
@@ -5736,8 +6786,7 @@ void ftrace_release_mod(struct module *mod)
        last_pg = &ftrace_pages_start;
        for (pg = ftrace_pages_start; pg; pg = *last_pg) {
                rec = &pg->records[0];
-               if (within_module_core(rec->ip, mod) ||
-                   within_module_init(rec->ip, mod)) {
+               if (within_module(rec->ip, mod)) {
                        /*
                         * As core pages are first, the first
                         * page should never be a module page.
@@ -5765,10 +6814,13 @@ void ftrace_release_mod(struct module *mod)
                /* Needs to be called outside of ftrace_lock */
                clear_mod_from_hashes(pg);
 
-               order = get_count_order(pg->size / ENTRIES_PER_PAGE);
-               free_pages((unsigned long)pg->records, order);
+               if (pg->records) {
+                       free_pages((unsigned long)pg->records, pg->order);
+                       ftrace_number_of_pages -= 1 << pg->order;
+               }
                tmp_page = pg->next;
                kfree(pg);
+               ftrace_number_of_groups--;
        }
 }
 
@@ -5806,10 +6858,16 @@ void ftrace_module_enable(struct module *mod)
                 * not part of this module, then skip this pg,
                 * which the "break" will do.
                 */
-               if (!within_module_core(rec->ip, mod) &&
-                   !within_module_init(rec->ip, mod))
+               if (!within_module(rec->ip, mod))
                        break;
 
+               /* Weak functions should still be ignored */
+               if (!test_for_valid_rec(rec)) {
+                       /* Clear all other flags. Should not be enabled anyway */
+                       rec->flags = FTRACE_FL_DISABLED;
+                       continue;
+               }
+
                cnt = 0;
 
                /*
@@ -5821,8 +6879,8 @@ void ftrace_module_enable(struct module *mod)
                if (ftrace_start_up)
                        cnt += referenced_filters(rec);
 
-               /* This clears FTRACE_FL_DISABLED */
-               rec->flags = cnt;
+               rec->flags &= ~FTRACE_FL_DISABLED;
+               rec->flags += cnt;
 
                if (ftrace_start_up && cnt) {
                        int failed = __ftrace_replace_code(rec, 1);
@@ -5846,11 +6904,16 @@ void ftrace_module_enable(struct module *mod)
 
 void ftrace_module_init(struct module *mod)
 {
+       int ret;
+
        if (ftrace_disabled || !mod->num_ftrace_callsites)
                return;
 
-       ftrace_process_locs(mod, mod->ftrace_callsites,
-                           mod->ftrace_callsites + mod->num_ftrace_callsites);
+       ret = ftrace_process_locs(mod, mod->ftrace_callsites,
+                                 mod->ftrace_callsites + mod->num_ftrace_callsites);
+       if (ret)
+               pr_warn("ftrace: failed to allocate entries for module '%s' functions\n",
+                       mod->name);
 }
 
 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
@@ -5929,7 +6992,7 @@ ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
                if (off)
                        *off = addr - found_func->ip;
                if (sym)
-                       strlcpy(sym, found_func->name, KSYM_NAME_LEN);
+                       strscpy(sym, found_func->name, KSYM_NAME_LEN);
 
                return found_func->name;
        }
@@ -5965,6 +7028,7 @@ int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
 {
        struct ftrace_mod_map *mod_map;
        struct ftrace_mod_func *mod_func;
+       int ret;
 
        preempt_disable();
        list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
@@ -5982,8 +7046,8 @@ int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
 
                        *value = mod_func->ip;
                        *type = 'T';
-                       strlcpy(name, mod_func->name, KSYM_NAME_LEN);
-                       strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
+                       strscpy(name, mod_func->name, KSYM_NAME_LEN);
+                       strscpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
                        *exported = 1;
                        preempt_enable();
                        return 0;
@@ -5991,8 +7055,10 @@ int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
                WARN_ON(1);
                break;
        }
+       ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
+                                           module_name, exported);
        preempt_enable();
-       return -ERANGE;
+       return ret;
 }
 
 #else
@@ -6004,6 +7070,18 @@ allocate_ftrace_mod_map(struct module *mod,
 {
        return NULL;
 }
+int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
+                          char *type, char *name, char *module_name,
+                          int *exported)
+{
+       int ret;
+
+       preempt_disable();
+       ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
+                                           module_name, exported);
+       preempt_enable();
+       return ret;
+}
 #endif /* CONFIG_MODULES */
 
 struct ftrace_init_func {
@@ -6017,11 +7095,7 @@ clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
 {
        struct ftrace_func_entry *entry;
 
-       if (ftrace_hash_empty(hash))
-               return;
-
-       entry = __ftrace_lookup_ip(hash, func->ip);
-
+       entry = ftrace_lookup_ip(hash, func->ip);
        /*
         * Do not allow this rec to match again.
         * Yeah, it may waste some memory, but will be removed
@@ -6055,7 +7129,7 @@ static void add_to_clear_hash_list(struct list_head *clear_list,
 
        func = kmalloc(sizeof(*func), GFP_KERNEL);
        if (!func) {
-               WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n");
+               MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
                return;
        }
 
@@ -6073,10 +7147,7 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
        struct dyn_ftrace key;
        struct ftrace_mod_map *mod_map = NULL;
        struct ftrace_init_func *func, *func_next;
-       struct list_head clear_hash;
-       int order;
-
-       INIT_LIST_HEAD(&clear_hash);
+       LIST_HEAD(clear_hash);
 
        key.ip = start;
        key.flags = end;        /* overload flags, as it is unsigned long */
@@ -6112,8 +7183,11 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
                ftrace_update_tot_cnt--;
                if (!pg->index) {
                        *last_pg = pg->next;
-                       order = get_count_order(pg->size / ENTRIES_PER_PAGE);
-                       free_pages((unsigned long)pg->records, order);
+                       if (pg->records) {
+                               free_pages((unsigned long)pg->records, pg->order);
+                               ftrace_number_of_pages -= 1 << pg->order;
+                       }
+                       ftrace_number_of_groups--;
                        kfree(pg);
                        pg = container_of(last_pg, struct ftrace_page, next);
                        if (!(*last_pg))
@@ -6138,9 +7212,16 @@ void __init ftrace_free_init_mem(void)
        void *start = (void *)(&__init_begin);
        void *end = (void *)(&__init_end);
 
+       ftrace_boot_snapshot();
+
        ftrace_free_mem(NULL, start, end);
 }
 
+int __init __weak ftrace_dyn_arch_init(void)
+{
+       return 0;
+}
+
 void __init ftrace_init(void)
 {
        extern unsigned long __start_mcount_loc[];
@@ -6161,13 +7242,20 @@ void __init ftrace_init(void)
        }
 
        pr_info("ftrace: allocating %ld entries in %ld pages\n",
-               count, count / ENTRIES_PER_PAGE + 1);
-
-       last_ftrace_enabled = ftrace_enabled = 1;
+               count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
 
        ret = ftrace_process_locs(NULL,
                                  __start_mcount_loc,
                                  __stop_mcount_loc);
+       if (ret) {
+               pr_warn("ftrace: failed to allocate entries for functions\n");
+               goto failed;
+       }
+
+       pr_info("ftrace: allocated %ld pages with %ld groups\n",
+               ftrace_number_of_pages, ftrace_number_of_groups);
+
+       last_ftrace_enabled = ftrace_enabled = 1;
 
        set_ftrace_early_filters();
 
@@ -6183,7 +7271,24 @@ void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
 
 static void ftrace_update_trampoline(struct ftrace_ops *ops)
 {
+       unsigned long trampoline = ops->trampoline;
+
        arch_ftrace_update_trampoline(ops);
+       if (ops->trampoline && ops->trampoline != trampoline &&
+           (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) {
+               /* Add to kallsyms before the perf events */
+               ftrace_add_trampoline_to_kallsyms(ops);
+               perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
+                                  ops->trampoline, ops->trampoline_size, false,
+                                  FTRACE_TRAMPOLINE_SYM);
+               /*
+                * Record the perf text poke event after the ksymbol register
+                * event.
+                */
+               perf_event_text_poke((void *)ops->trampoline, NULL, 0,
+                                    (void *)ops->trampoline,
+                                    ops->trampoline_size);
+       }
 }
 
 void ftrace_init_trace_array(struct trace_array *tr)
@@ -6196,8 +7301,7 @@ void ftrace_init_trace_array(struct trace_array *tr)
 
 struct ftrace_ops global_ops = {
        .func                   = ftrace_stub,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
-                                 FTRACE_OPS_FL_INITIALIZED |
+       .flags                  = FTRACE_OPS_FL_INITIALIZED |
                                  FTRACE_OPS_FL_PID,
 };
 
@@ -6209,12 +7313,8 @@ static int __init ftrace_nodyn_init(void)
 core_initcall(ftrace_nodyn_init);
 
 static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
-static inline void ftrace_startup_enable(int command) { }
 static inline void ftrace_startup_all(int command) { }
 
-# define ftrace_startup_sysctl()       do { } while (0)
-# define ftrace_shutdown_sysctl()      do { } while (0)
-
 static void ftrace_update_trampoline(struct ftrace_ops *ops)
 {
 }
@@ -6247,20 +7347,20 @@ void ftrace_reset_array_ops(struct trace_array *tr)
 
 static nokprobe_inline void
 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
-                      struct ftrace_ops *ignored, struct pt_regs *regs)
+                      struct ftrace_ops *ignored, struct ftrace_regs *fregs)
 {
+       struct pt_regs *regs = ftrace_get_regs(fregs);
        struct ftrace_ops *op;
        int bit;
 
-       bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
-       if (bit < 0)
-               return;
-
        /*
-        * Some of the ops may be dynamically allocated,
-        * they must be freed after a synchronize_rcu().
+        * The ftrace_test_and_set_recursion() will disable preemption,
+        * which is required since some of the ops may be dynamically
+        * allocated, they must be freed after a synchronize_rcu().
         */
-       preempt_disable_notrace();
+       bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
+       if (bit < 0)
+               return;
 
        do_for_each_ftrace_op(op, ftrace_ops_list) {
                /* Stub functions don't need to be called nor tested */
@@ -6269,8 +7369,6 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
                /*
                 * Check the following for each ops before calling their func:
                 *  if RCU flag is set, then rcu_is_watching() must be true
-                *  if PER_CPU is set, then ftrace_function_local_disable()
-                *                          must be false
                 *  Otherwise test if the ip matches the ops filter
                 *
                 * If any of the above fails then the op->func() is not executed.
@@ -6281,11 +7379,10 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
                                pr_warn("op=%p %pS\n", op, op);
                                goto out;
                        }
-                       op->func(ip, parent_ip, op, regs);
+                       op->func(ip, parent_ip, op, fregs);
                }
        } while_for_each_ftrace_op(op);
 out:
-       preempt_enable_notrace();
        trace_clear_recursion(bit);
 }
 
@@ -6301,44 +7398,41 @@ out:
  * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
  * An architecture can pass partial regs with ftrace_ops and still
  * set the ARCH_SUPPORTS_FTRACE_OPS.
+ *
+ * In vmlinux.lds.h, ftrace_ops_list_func() is defined to be
+ * arch_ftrace_ops_list_func.
  */
 #if ARCH_SUPPORTS_FTRACE_OPS
-static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
-                                struct ftrace_ops *op, struct pt_regs *regs)
+void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+                              struct ftrace_ops *op, struct ftrace_regs *fregs)
 {
-       __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
+       __ftrace_ops_list_func(ip, parent_ip, NULL, fregs);
 }
-NOKPROBE_SYMBOL(ftrace_ops_list_func);
 #else
-static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
+void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
 {
        __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
 }
-NOKPROBE_SYMBOL(ftrace_ops_no_ops);
 #endif
+NOKPROBE_SYMBOL(arch_ftrace_ops_list_func);
 
 /*
  * If there's only one function registered but it does not support
- * recursion, needs RCU protection and/or requires per cpu handling, then
- * this function will be called by the mcount trampoline.
+ * recursion, needs RCU protection, then this function will be called
+ * by the mcount trampoline.
  */
 static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
-                                  struct ftrace_ops *op, struct pt_regs *regs)
+                                  struct ftrace_ops *op, struct ftrace_regs *fregs)
 {
        int bit;
 
-       if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching())
-               return;
-
-       bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
+       bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
        if (bit < 0)
                return;
 
-       preempt_disable_notrace();
-
-       op->func(ip, parent_ip, op, regs);
+       if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
+               op->func(ip, parent_ip, op, fregs);
 
-       preempt_enable_notrace();
        trace_clear_recursion(bit);
 }
 NOKPROBE_SYMBOL(ftrace_ops_assist_func);
@@ -6352,16 +7446,16 @@ NOKPROBE_SYMBOL(ftrace_ops_assist_func);
  * have its own recursion protection, then it should call the
  * ftrace_ops_assist_func() instead.
  *
- * Returns the function that the trampoline should call for @ops.
+ * Returns: the function that the trampoline should call for @ops.
  */
 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
 {
        /*
-        * If the function does not handle recursion, needs to be RCU safe,
-        * or does per cpu logic, then we need to call the assist handler.
+        * If the function does not handle recursion or needs to be RCU safe,
+        * then we need to call the assist handler.
         */
-       if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
-           ops->flags & FTRACE_OPS_FL_RCU)
+       if (ops->flags & (FTRACE_OPS_FL_RECURSION |
+                         FTRACE_OPS_FL_RCU))
                return ftrace_ops_assist_func;
 
        return ops->func;
@@ -6369,15 +7463,23 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
 
 static void
 ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
-                   struct task_struct *prev, struct task_struct *next)
+                                    struct task_struct *prev,
+                                    struct task_struct *next,
+                                    unsigned int prev_state)
 {
        struct trace_array *tr = data;
        struct trace_pid_list *pid_list;
+       struct trace_pid_list *no_pid_list;
 
        pid_list = rcu_dereference_sched(tr->function_pids);
+       no_pid_list = rcu_dereference_sched(tr->function_no_pids);
 
-       this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
-                      trace_ignore_this_task(pid_list, next));
+       if (trace_ignore_this_task(pid_list, no_pid_list, next))
+               this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
+                              FTRACE_PID_IGNORE);
+       else
+               this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
+                              next->pid);
 }
 
 static void
@@ -6390,6 +7492,9 @@ ftrace_pid_follow_sched_process_fork(void *data,
 
        pid_list = rcu_dereference_sched(tr->function_pids);
        trace_filter_add_remove_task(pid_list, self, task);
+
+       pid_list = rcu_dereference_sched(tr->function_no_pids);
+       trace_filter_add_remove_task(pid_list, self, task);
 }
 
 static void
@@ -6400,6 +7505,9 @@ ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
 
        pid_list = rcu_dereference_sched(tr->function_pids);
        trace_filter_add_remove_task(pid_list, NULL, task);
+
+       pid_list = rcu_dereference_sched(tr->function_no_pids);
+       trace_filter_add_remove_task(pid_list, NULL, task);
 }
 
 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
@@ -6407,52 +7515,67 @@ void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
        if (enable) {
                register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
                                                  tr);
-               register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
+               register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
                                                  tr);
        } else {
                unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
                                                    tr);
-               unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
+               unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
                                                    tr);
        }
 }
 
-static void clear_ftrace_pids(struct trace_array *tr)
+static void clear_ftrace_pids(struct trace_array *tr, int type)
 {
        struct trace_pid_list *pid_list;
+       struct trace_pid_list *no_pid_list;
        int cpu;
 
        pid_list = rcu_dereference_protected(tr->function_pids,
                                             lockdep_is_held(&ftrace_lock));
-       if (!pid_list)
+       no_pid_list = rcu_dereference_protected(tr->function_no_pids,
+                                               lockdep_is_held(&ftrace_lock));
+
+       /* Make sure there's something to do */
+       if (!pid_type_enabled(type, pid_list, no_pid_list))
                return;
 
-       unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
+       /* See if the pids still need to be checked after this */
+       if (!still_need_pid_events(type, pid_list, no_pid_list)) {
+               unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
+               for_each_possible_cpu(cpu)
+                       per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE;
+       }
 
-       for_each_possible_cpu(cpu)
-               per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false;
+       if (type & TRACE_PIDS)
+               rcu_assign_pointer(tr->function_pids, NULL);
 
-       rcu_assign_pointer(tr->function_pids, NULL);
+       if (type & TRACE_NO_PIDS)
+               rcu_assign_pointer(tr->function_no_pids, NULL);
 
        /* Wait till all users are no longer using pid filtering */
        synchronize_rcu();
 
-       trace_free_pid_list(pid_list);
+       if ((type & TRACE_PIDS) && pid_list)
+               trace_pid_list_free(pid_list);
+
+       if ((type & TRACE_NO_PIDS) && no_pid_list)
+               trace_pid_list_free(no_pid_list);
 }
 
 void ftrace_clear_pids(struct trace_array *tr)
 {
        mutex_lock(&ftrace_lock);
 
-       clear_ftrace_pids(tr);
+       clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
 
        mutex_unlock(&ftrace_lock);
 }
 
-static void ftrace_pid_reset(struct trace_array *tr)
+static void ftrace_pid_reset(struct trace_array *tr, int type)
 {
        mutex_lock(&ftrace_lock);
-       clear_ftrace_pids(tr);
+       clear_ftrace_pids(tr, type);
 
        ftrace_update_pid_func();
        ftrace_startup_all(0);
@@ -6485,9 +7608,10 @@ static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
        struct trace_array *tr = m->private;
        struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
 
-       if (v == FTRACE_NO_PIDS)
+       if (v == FTRACE_NO_PIDS) {
+               (*pos)++;
                return NULL;
-
+       }
        return trace_pid_next(pid_list, v, pos);
 }
 
@@ -6515,21 +7639,71 @@ static const struct seq_operations ftrace_pid_sops = {
        .show = fpid_show,
 };
 
-static int
-ftrace_pid_open(struct inode *inode, struct file *file)
+static void *fnpid_start(struct seq_file *m, loff_t *pos)
+       __acquires(RCU)
 {
+       struct trace_pid_list *pid_list;
+       struct trace_array *tr = m->private;
+
+       mutex_lock(&ftrace_lock);
+       rcu_read_lock_sched();
+
+       pid_list = rcu_dereference_sched(tr->function_no_pids);
+
+       if (!pid_list)
+               return !(*pos) ? FTRACE_NO_PIDS : NULL;
+
+       return trace_pid_start(pid_list, pos);
+}
+
+static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       struct trace_array *tr = m->private;
+       struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids);
+
+       if (v == FTRACE_NO_PIDS) {
+               (*pos)++;
+               return NULL;
+       }
+       return trace_pid_next(pid_list, v, pos);
+}
+
+static const struct seq_operations ftrace_no_pid_sops = {
+       .start = fnpid_start,
+       .next = fnpid_next,
+       .stop = fpid_stop,
+       .show = fpid_show,
+};
+
+static int pid_open(struct inode *inode, struct file *file, int type)
+{
+       const struct seq_operations *seq_ops;
        struct trace_array *tr = inode->i_private;
        struct seq_file *m;
        int ret = 0;
 
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        if ((file->f_mode & FMODE_WRITE) &&
            (file->f_flags & O_TRUNC))
-               ftrace_pid_reset(tr);
+               ftrace_pid_reset(tr, type);
 
-       ret = seq_open(file, &ftrace_pid_sops);
+       switch (type) {
+       case TRACE_PIDS:
+               seq_ops = &ftrace_pid_sops;
+               break;
+       case TRACE_NO_PIDS:
+               seq_ops = &ftrace_no_pid_sops;
+               break;
+       default:
+               trace_array_put(tr);
+               WARN_ON_ONCE(1);
+               return -EINVAL;
+       }
+
+       ret = seq_open(file, seq_ops);
        if (ret < 0) {
                trace_array_put(tr);
        } else {
@@ -6541,10 +7715,23 @@ ftrace_pid_open(struct inode *inode, struct file *file)
        return ret;
 }
 
+static int
+ftrace_pid_open(struct inode *inode, struct file *file)
+{
+       return pid_open(inode, file, TRACE_PIDS);
+}
+
+static int
+ftrace_no_pid_open(struct inode *inode, struct file *file)
+{
+       return pid_open(inode, file, TRACE_NO_PIDS);
+}
+
 static void ignore_task_cpu(void *data)
 {
        struct trace_array *tr = data;
        struct trace_pid_list *pid_list;
+       struct trace_pid_list *no_pid_list;
 
        /*
         * This function is called by on_each_cpu() while the
@@ -6552,18 +7739,25 @@ static void ignore_task_cpu(void *data)
         */
        pid_list = rcu_dereference_protected(tr->function_pids,
                                             mutex_is_locked(&ftrace_lock));
+       no_pid_list = rcu_dereference_protected(tr->function_no_pids,
+                                               mutex_is_locked(&ftrace_lock));
 
-       this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
-                      trace_ignore_this_task(pid_list, current));
+       if (trace_ignore_this_task(pid_list, no_pid_list, current))
+               this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
+                              FTRACE_PID_IGNORE);
+       else
+               this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
+                              current->pid);
 }
 
 static ssize_t
-ftrace_pid_write(struct file *filp, const char __user *ubuf,
-                  size_t cnt, loff_t *ppos)
+pid_write(struct file *filp, const char __user *ubuf,
+         size_t cnt, loff_t *ppos, int type)
 {
        struct seq_file *m = filp->private_data;
        struct trace_array *tr = m->private;
-       struct trace_pid_list *filtered_pids = NULL;
+       struct trace_pid_list *filtered_pids;
+       struct trace_pid_list *other_pids;
        struct trace_pid_list *pid_list;
        ssize_t ret;
 
@@ -6572,19 +7766,43 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
 
        mutex_lock(&ftrace_lock);
 
-       filtered_pids = rcu_dereference_protected(tr->function_pids,
+       switch (type) {
+       case TRACE_PIDS:
+               filtered_pids = rcu_dereference_protected(tr->function_pids,
+                                            lockdep_is_held(&ftrace_lock));
+               other_pids = rcu_dereference_protected(tr->function_no_pids,
+                                            lockdep_is_held(&ftrace_lock));
+               break;
+       case TRACE_NO_PIDS:
+               filtered_pids = rcu_dereference_protected(tr->function_no_pids,
+                                            lockdep_is_held(&ftrace_lock));
+               other_pids = rcu_dereference_protected(tr->function_pids,
                                             lockdep_is_held(&ftrace_lock));
+               break;
+       default:
+               ret = -EINVAL;
+               WARN_ON_ONCE(1);
+               goto out;
+       }
 
        ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
        if (ret < 0)
                goto out;
 
-       rcu_assign_pointer(tr->function_pids, pid_list);
+       switch (type) {
+       case TRACE_PIDS:
+               rcu_assign_pointer(tr->function_pids, pid_list);
+               break;
+       case TRACE_NO_PIDS:
+               rcu_assign_pointer(tr->function_no_pids, pid_list);
+               break;
+       }
+
 
        if (filtered_pids) {
                synchronize_rcu();
-               trace_free_pid_list(filtered_pids);
-       } else if (pid_list) {
+               trace_pid_list_free(filtered_pids);
+       } else if (pid_list && !other_pids) {
                /* Register a probe to set whether to ignore the tracing of a task */
                register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
        }
@@ -6607,6 +7825,20 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
        return ret;
 }
 
+static ssize_t
+ftrace_pid_write(struct file *filp, const char __user *ubuf,
+                size_t cnt, loff_t *ppos)
+{
+       return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
+}
+
+static ssize_t
+ftrace_no_pid_write(struct file *filp, const char __user *ubuf,
+                   size_t cnt, loff_t *ppos)
+{
+       return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
+}
+
 static int
 ftrace_pid_release(struct inode *inode, struct file *file)
 {
@@ -6625,10 +7857,20 @@ static const struct file_operations ftrace_pid_fops = {
        .release        = ftrace_pid_release,
 };
 
+static const struct file_operations ftrace_no_pid_fops = {
+       .open           = ftrace_no_pid_open,
+       .write          = ftrace_no_pid_write,
+       .read           = seq_read,
+       .llseek         = tracing_lseek,
+       .release        = ftrace_pid_release,
+};
+
 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
 {
-       trace_create_file("set_ftrace_pid", 0644, d_tracer,
+       trace_create_file("set_ftrace_pid", TRACE_MODE_WRITE, d_tracer,
                            tr, &ftrace_pid_fops);
+       trace_create_file("set_ftrace_notrace_pid", TRACE_MODE_WRITE,
+                         d_tracer, tr, &ftrace_no_pid_fops);
 }
 
 void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
@@ -6656,16 +7898,155 @@ void ftrace_kill(void)
 }
 
 /**
- * Test if ftrace is dead or not.
+ * ftrace_is_dead - Test if ftrace is dead or not.
+ *
+ * Returns: 1 if ftrace is "dead", zero otherwise.
  */
 int ftrace_is_dead(void)
 {
        return ftrace_disabled;
 }
 
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+/*
+ * When registering ftrace_ops with IPMODIFY, it is necessary to make sure
+ * it doesn't conflict with any direct ftrace_ops. If there is existing
+ * direct ftrace_ops on a kernel function being patched, call
+ * FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER on it to enable sharing.
+ *
+ * @ops:     ftrace_ops being registered.
+ *
+ * Returns:
+ *         0 on success;
+ *         Negative on failure.
+ */
+static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
+{
+       struct ftrace_func_entry *entry;
+       struct ftrace_hash *hash;
+       struct ftrace_ops *op;
+       int size, i, ret;
+
+       lockdep_assert_held_once(&direct_mutex);
+
+       if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
+               return 0;
+
+       hash = ops->func_hash->filter_hash;
+       size = 1 << hash->size_bits;
+       for (i = 0; i < size; i++) {
+               hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
+                       unsigned long ip = entry->ip;
+                       bool found_op = false;
+
+                       mutex_lock(&ftrace_lock);
+                       do_for_each_ftrace_op(op, ftrace_ops_list) {
+                               if (!(op->flags & FTRACE_OPS_FL_DIRECT))
+                                       continue;
+                               if (ops_references_ip(op, ip)) {
+                                       found_op = true;
+                                       break;
+                               }
+                       } while_for_each_ftrace_op(op);
+                       mutex_unlock(&ftrace_lock);
+
+                       if (found_op) {
+                               if (!op->ops_func)
+                                       return -EBUSY;
+
+                               ret = op->ops_func(op, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER);
+                               if (ret)
+                                       return ret;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+/*
+ * Similar to prepare_direct_functions_for_ipmodify, clean up after ops
+ * with IPMODIFY is unregistered. The cleanup is optional for most DIRECT
+ * ops.
+ */
+static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
+{
+       struct ftrace_func_entry *entry;
+       struct ftrace_hash *hash;
+       struct ftrace_ops *op;
+       int size, i;
+
+       if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
+               return;
+
+       mutex_lock(&direct_mutex);
+
+       hash = ops->func_hash->filter_hash;
+       size = 1 << hash->size_bits;
+       for (i = 0; i < size; i++) {
+               hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
+                       unsigned long ip = entry->ip;
+                       bool found_op = false;
+
+                       mutex_lock(&ftrace_lock);
+                       do_for_each_ftrace_op(op, ftrace_ops_list) {
+                               if (!(op->flags & FTRACE_OPS_FL_DIRECT))
+                                       continue;
+                               if (ops_references_ip(op, ip)) {
+                                       found_op = true;
+                                       break;
+                               }
+                       } while_for_each_ftrace_op(op);
+                       mutex_unlock(&ftrace_lock);
+
+                       /* The cleanup is optional, ignore any errors */
+                       if (found_op && op->ops_func)
+                               op->ops_func(op, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER);
+               }
+       }
+       mutex_unlock(&direct_mutex);
+}
+
+#define lock_direct_mutex()    mutex_lock(&direct_mutex)
+#define unlock_direct_mutex()  mutex_unlock(&direct_mutex)
+
+#else  /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+
+static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
+{
+       return 0;
+}
+
+static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
+{
+}
+
+#define lock_direct_mutex()    do { } while (0)
+#define unlock_direct_mutex()  do { } while (0)
+
+#endif  /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+
+/*
+ * Similar to register_ftrace_function, except we don't lock direct_mutex.
+ */
+static int register_ftrace_function_nolock(struct ftrace_ops *ops)
+{
+       int ret;
+
+       ftrace_ops_init(ops);
+
+       mutex_lock(&ftrace_lock);
+
+       ret = ftrace_startup(ops, 0);
+
+       mutex_unlock(&ftrace_lock);
+
+       return ret;
+}
+
 /**
  * register_ftrace_function - register a function for profiling
- * @ops - ops structure that holds the function for profiling.
+ * @ops:       ops structure that holds the function for profiling.
  *
  * Register a function to be called by all functions in the
  * kernel.
@@ -6676,23 +8057,24 @@ int ftrace_is_dead(void)
  */
 int register_ftrace_function(struct ftrace_ops *ops)
 {
-       int ret = -1;
-
-       ftrace_ops_init(ops);
-
-       mutex_lock(&ftrace_lock);
+       int ret;
 
-       ret = ftrace_startup(ops, 0);
+       lock_direct_mutex();
+       ret = prepare_direct_functions_for_ipmodify(ops);
+       if (ret < 0)
+               goto out_unlock;
 
-       mutex_unlock(&ftrace_lock);
+       ret = register_ftrace_function_nolock(ops);
 
+out_unlock:
+       unlock_direct_mutex();
        return ret;
 }
 EXPORT_SYMBOL_GPL(register_ftrace_function);
 
 /**
  * unregister_ftrace_function - unregister a function for profiling.
- * @ops - ops structure that holds the function to unregister
+ * @ops:       ops structure that holds the function to unregister
  *
  * Unregister a function that was added to be called by ftrace profiling.
  */
@@ -6704,14 +8086,141 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
        ret = ftrace_shutdown(ops, 0);
        mutex_unlock(&ftrace_lock);
 
+       cleanup_direct_functions_after_ipmodify(ops);
        return ret;
 }
 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
 
-int
+static int symbols_cmp(const void *a, const void *b)
+{
+       const char **str_a = (const char **) a;
+       const char **str_b = (const char **) b;
+
+       return strcmp(*str_a, *str_b);
+}
+
+struct kallsyms_data {
+       unsigned long *addrs;
+       const char **syms;
+       size_t cnt;
+       size_t found;
+};
+
+/* This function gets called for all kernel and module symbols
+ * and returns 1 in case we resolved all the requested symbols,
+ * 0 otherwise.
+ */
+static int kallsyms_callback(void *data, const char *name, unsigned long addr)
+{
+       struct kallsyms_data *args = data;
+       const char **sym;
+       int idx;
+
+       sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp);
+       if (!sym)
+               return 0;
+
+       idx = sym - args->syms;
+       if (args->addrs[idx])
+               return 0;
+
+       if (!ftrace_location(addr))
+               return 0;
+
+       args->addrs[idx] = addr;
+       args->found++;
+       return args->found == args->cnt ? 1 : 0;
+}
+
+/**
+ * ftrace_lookup_symbols - Lookup addresses for array of symbols
+ *
+ * @sorted_syms: array of symbols pointers symbols to resolve,
+ * must be alphabetically sorted
+ * @cnt: number of symbols/addresses in @syms/@addrs arrays
+ * @addrs: array for storing resulting addresses
+ *
+ * This function looks up addresses for array of symbols provided in
+ * @syms array (must be alphabetically sorted) and stores them in
+ * @addrs array, which needs to be big enough to store at least @cnt
+ * addresses.
+ *
+ * Returns: 0 if all provided symbols are found, -ESRCH otherwise.
+ */
+int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
+{
+       struct kallsyms_data args;
+       int found_all;
+
+       memset(addrs, 0, sizeof(*addrs) * cnt);
+       args.addrs = addrs;
+       args.syms = sorted_syms;
+       args.cnt = cnt;
+       args.found = 0;
+
+       found_all = kallsyms_on_each_symbol(kallsyms_callback, &args);
+       if (found_all)
+               return 0;
+       found_all = module_kallsyms_on_each_symbol(NULL, kallsyms_callback, &args);
+       return found_all ? 0 : -ESRCH;
+}
+
+#ifdef CONFIG_SYSCTL
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+static void ftrace_startup_sysctl(void)
+{
+       int command;
+
+       if (unlikely(ftrace_disabled))
+               return;
+
+       /* Force update next time */
+       saved_ftrace_func = NULL;
+       /* ftrace_start_up is true if we want ftrace running */
+       if (ftrace_start_up) {
+               command = FTRACE_UPDATE_CALLS;
+               if (ftrace_graph_active)
+                       command |= FTRACE_START_FUNC_RET;
+               ftrace_startup_enable(command);
+       }
+}
+
+static void ftrace_shutdown_sysctl(void)
+{
+       int command;
+
+       if (unlikely(ftrace_disabled))
+               return;
+
+       /* ftrace_start_up is true if ftrace is running */
+       if (ftrace_start_up) {
+               command = FTRACE_DISABLE_CALLS;
+               if (ftrace_graph_active)
+                       command |= FTRACE_STOP_FUNC_RET;
+               ftrace_run_update_code(command);
+       }
+}
+#else
+# define ftrace_startup_sysctl()       do { } while (0)
+# define ftrace_shutdown_sysctl()      do { } while (0)
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+static bool is_permanent_ops_registered(void)
+{
+       struct ftrace_ops *op;
+
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               if (op->flags & FTRACE_OPS_FL_PERMANENT)
+                       return true;
+       } while_for_each_ftrace_op(op);
+
+       return false;
+}
+
+static int
 ftrace_enable_sysctl(struct ctl_table *table, int write,
-                    void __user *buffer, size_t *lenp,
-                    loff_t *ppos)
+                    void *buffer, size_t *lenp, loff_t *ppos)
 {
        int ret = -ENODEV;
 
@@ -6725,8 +8234,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
        if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
                goto out;
 
-       last_ftrace_enabled = !!ftrace_enabled;
-
        if (ftrace_enabled) {
 
                /* we are starting ftrace again */
@@ -6737,13 +8244,39 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
                ftrace_startup_sysctl();
 
        } else {
+               if (is_permanent_ops_registered()) {
+                       ftrace_enabled = true;
+                       ret = -EBUSY;
+                       goto out;
+               }
+
                /* stopping ftrace calls (just send to ftrace_stub) */
                ftrace_trace_function = ftrace_stub;
 
                ftrace_shutdown_sysctl();
        }
 
+       last_ftrace_enabled = !!ftrace_enabled;
  out:
        mutex_unlock(&ftrace_lock);
        return ret;
 }
+
+static struct ctl_table ftrace_sysctls[] = {
+       {
+               .procname       = "ftrace_enabled",
+               .data           = &ftrace_enabled,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = ftrace_enable_sysctl,
+       },
+       {}
+};
+
+static int __init ftrace_sysctl_init(void)
+{
+       register_sysctl_init("kernel", ftrace_sysctls);
+       return 0;
+}
+late_initcall(ftrace_sysctl_init);
+#endif