Merge tag 'trace-v5.13-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[sfrench/cifs-2.6.git] / kernel / trace / ftrace.c
index 3ba52d4e1314228147112047aa497709dbe6ba59..2e8a3fde710446e52cf48d593e453da0bf5b057b 100644 (file)
@@ -1045,7 +1045,7 @@ struct ftrace_ops global_ops = {
 };
 
 /*
- * Used by the stack undwinder to know about dynamic ftrace trampolines.
+ * Used by the stack unwinder to know about dynamic ftrace trampolines.
  */
 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
 {
@@ -1090,7 +1090,7 @@ struct ftrace_page {
        struct ftrace_page      *next;
        struct dyn_ftrace       *records;
        int                     index;
-       int                     size;
+       int                     order;
 };
 
 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
@@ -3000,7 +3000,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
                 * When the kernel is preemptive, tasks can be preempted
                 * while on a ftrace trampoline. Just scheduling a task on
                 * a CPU is not good enough to flush them. Calling
-                * synchornize_rcu_tasks() will wait for those tasks to
+                * synchronize_rcu_tasks() will wait for those tasks to
                 * execute and either schedule voluntarily or enter user space.
                 */
                if (IS_ENABLED(CONFIG_PREEMPTION))
@@ -3156,15 +3156,9 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
        if (WARN_ON(!count))
                return -EINVAL;
 
+       /* We want to fill as much as possible, with no empty pages */
        pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
-       order = get_count_order(pages);
-
-       /*
-        * We want to fill as much as possible. No more than a page
-        * may be empty.
-        */
-       if (!is_power_of_2(pages))
-               order--;
+       order = fls(pages) - 1;
 
  again:
        pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
@@ -3181,7 +3175,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
        ftrace_number_of_groups++;
 
        cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
-       pg->size = cnt;
+       pg->order = order;
 
        if (cnt > count)
                cnt = count;
@@ -3194,7 +3188,6 @@ ftrace_allocate_pages(unsigned long num_to_init)
 {
        struct ftrace_page *start_pg;
        struct ftrace_page *pg;
-       int order;
        int cnt;
 
        if (!num_to_init)
@@ -3230,13 +3223,13 @@ ftrace_allocate_pages(unsigned long num_to_init)
  free_pages:
        pg = start_pg;
        while (pg) {
-               order = get_count_order(pg->size / ENTRIES_PER_PAGE);
-               if (order >= 0)
-                       free_pages((unsigned long)pg->records, order);
+               if (pg->records) {
+                       free_pages((unsigned long)pg->records, pg->order);
+                       ftrace_number_of_pages -= 1 << pg->order;
+               }
                start_pg = pg->next;
                kfree(pg);
                pg = start_pg;
-               ftrace_number_of_pages -= 1 << order;
                ftrace_number_of_groups--;
        }
        pr_info("ftrace: FAILED to allocate memory for functions\n");
@@ -5407,7 +5400,7 @@ EXPORT_SYMBOL_GPL(modify_ftrace_direct);
  * @reset - non zero to reset all filters before applying this filter.
  *
  * Filters denote which functions should be enabled when tracing is enabled
- * If @ip is NULL, it failes to update filter.
+ * If @ip is NULL, it fails to update filter.
  */
 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
                         int remove, int reset)
@@ -5631,7 +5624,10 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
        parser = &iter->parser;
        if (trace_parser_loaded(parser)) {
-               ftrace_match_records(iter->hash, parser->buffer, parser->idx);
+               int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
+
+               ftrace_process_regex(iter, parser->buffer,
+                                    parser->idx, enable);
        }
 
        trace_parser_put(parser);
@@ -6221,6 +6217,7 @@ static int ftrace_process_locs(struct module *mod,
        p = start;
        pg = start_pg;
        while (p < end) {
+               unsigned long end_offset;
                addr = ftrace_call_adjust(*p++);
                /*
                 * Some architecture linkers will pad between
@@ -6231,7 +6228,8 @@ static int ftrace_process_locs(struct module *mod,
                if (!addr)
                        continue;
 
-               if (pg->index == pg->size) {
+               end_offset = (pg->index+1) * sizeof(pg->records[0]);
+               if (end_offset > PAGE_SIZE << pg->order) {
                        /* We should have allocated enough */
                        if (WARN_ON(!pg->next))
                                break;
@@ -6359,7 +6357,7 @@ clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
        }
 }
 
-/* Clear any records from hashs */
+/* Clear any records from hashes */
 static void clear_mod_from_hashes(struct ftrace_page *pg)
 {
        struct trace_array *tr;
@@ -6400,7 +6398,6 @@ void ftrace_release_mod(struct module *mod)
        struct ftrace_page **last_pg;
        struct ftrace_page *tmp_page = NULL;
        struct ftrace_page *pg;
-       int order;
 
        mutex_lock(&ftrace_lock);
 
@@ -6451,12 +6448,12 @@ void ftrace_release_mod(struct module *mod)
                /* Needs to be called outside of ftrace_lock */
                clear_mod_from_hashes(pg);
 
-               order = get_count_order(pg->size / ENTRIES_PER_PAGE);
-               if (order >= 0)
-                       free_pages((unsigned long)pg->records, order);
+               if (pg->records) {
+                       free_pages((unsigned long)pg->records, pg->order);
+                       ftrace_number_of_pages -= 1 << pg->order;
+               }
                tmp_page = pg->next;
                kfree(pg);
-               ftrace_number_of_pages -= 1 << order;
                ftrace_number_of_groups--;
        }
 }
@@ -6774,7 +6771,6 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
        struct ftrace_mod_map *mod_map = NULL;
        struct ftrace_init_func *func, *func_next;
        struct list_head clear_hash;
-       int order;
 
        INIT_LIST_HEAD(&clear_hash);
 
@@ -6812,10 +6808,10 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
                ftrace_update_tot_cnt--;
                if (!pg->index) {
                        *last_pg = pg->next;
-                       order = get_count_order(pg->size / ENTRIES_PER_PAGE);
-                       if (order >= 0)
-                               free_pages((unsigned long)pg->records, order);
-                       ftrace_number_of_pages -= 1 << order;
+                       if (pg->records) {
+                               free_pages((unsigned long)pg->records, pg->order);
+                               ftrace_number_of_pages -= 1 << pg->order;
+                       }
                        ftrace_number_of_groups--;
                        kfree(pg);
                        pg = container_of(last_pg, struct ftrace_page, next);