tracing: Fix possible NULL pointer dereferences
[opensuse:kernel.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/slab.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31 #include <linux/rcupdate.h>
32
33 #include <trace/events/sched.h>
34
35 #include <asm/ftrace.h>
36 #include <asm/setup.h>
37
38 #include "trace_output.h"
39 #include "trace_stat.h"
40
41 #define FTRACE_WARN_ON(cond)                    \
42         ({                                      \
43                 int ___r = cond;                \
44                 if (WARN_ON(___r))              \
45                         ftrace_kill();          \
46                 ___r;                           \
47         })
48
49 #define FTRACE_WARN_ON_ONCE(cond)               \
50         ({                                      \
51                 int ___r = cond;                \
52                 if (WARN_ON_ONCE(___r))         \
53                         ftrace_kill();          \
54                 ___r;                           \
55         })
56
57 /* hash bits for specific function selection */
58 #define FTRACE_HASH_BITS 7
59 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
60 #define FTRACE_HASH_DEFAULT_BITS 10
61 #define FTRACE_HASH_MAX_BITS 12
62
63 /* ftrace_enabled is a method to turn ftrace on or off */
64 int ftrace_enabled __read_mostly;
65 static int last_ftrace_enabled;
66
67 /* Quick disabling of function tracer. */
68 int function_trace_stop;
69
70 /* List for set_ftrace_pid's pids. */
71 LIST_HEAD(ftrace_pids);
72 struct ftrace_pid {
73         struct list_head list;
74         struct pid *pid;
75 };
76
77 /*
78  * ftrace_disabled is set when an anomaly is discovered.
79  * ftrace_disabled is much stronger than ftrace_enabled.
80  */
81 static int ftrace_disabled __read_mostly;
82
83 static DEFINE_MUTEX(ftrace_lock);
84
85 static struct ftrace_ops ftrace_list_end __read_mostly =
86 {
87         .func           = ftrace_stub,
88 };
89
90 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
91 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
92 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
93 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
94 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
95 static struct ftrace_ops global_ops;
96
97 static void
98 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
99
100 /*
101  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
102  * can use rcu_dereference_raw() is that elements removed from this list
103  * are simply leaked, so there is no need to interact with a grace-period
104  * mechanism.  The rcu_dereference_raw() calls are needed to handle
105  * concurrent insertions into the ftrace_global_list.
106  *
107  * Silly Alpha and silly pointer-speculation compiler optimizations!
108  */
109 static void ftrace_global_list_func(unsigned long ip,
110                                     unsigned long parent_ip)
111 {
112         struct ftrace_ops *op;
113
114         if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
115                 return;
116
117         trace_recursion_set(TRACE_GLOBAL_BIT);
118         op = rcu_dereference_raw(ftrace_global_list); /*see above*/
119         while (op != &ftrace_list_end) {
120                 op->func(ip, parent_ip);
121                 op = rcu_dereference_raw(op->next); /*see above*/
122         };
123         trace_recursion_clear(TRACE_GLOBAL_BIT);
124 }
125
126 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
127 {
128         if (!test_tsk_trace_trace(current))
129                 return;
130
131         ftrace_pid_function(ip, parent_ip);
132 }
133
134 static void set_ftrace_pid_function(ftrace_func_t func)
135 {
136         /* do not set ftrace_pid_function to itself! */
137         if (func != ftrace_pid_func)
138                 ftrace_pid_function = func;
139 }
140
141 /**
142  * clear_ftrace_function - reset the ftrace function
143  *
144  * This NULLs the ftrace function and in essence stops
145  * tracing.  There may be lag
146  */
147 void clear_ftrace_function(void)
148 {
149         ftrace_trace_function = ftrace_stub;
150         __ftrace_trace_function = ftrace_stub;
151         ftrace_pid_function = ftrace_stub;
152 }
153
154 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
155 /*
156  * For those archs that do not test ftrace_trace_stop in their
157  * mcount call site, we need to do it from C.
158  */
159 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
160 {
161         if (function_trace_stop)
162                 return;
163
164         __ftrace_trace_function(ip, parent_ip);
165 }
166 #endif
167
168 static void update_global_ops(void)
169 {
170         ftrace_func_t func;
171
172         /*
173          * If there's only one function registered, then call that
174          * function directly. Otherwise, we need to iterate over the
175          * registered callers.
176          */
177         if (ftrace_global_list == &ftrace_list_end ||
178             ftrace_global_list->next == &ftrace_list_end)
179                 func = ftrace_global_list->func;
180         else
181                 func = ftrace_global_list_func;
182
183         /* If we filter on pids, update to use the pid function */
184         if (!list_empty(&ftrace_pids)) {
185                 set_ftrace_pid_function(func);
186                 func = ftrace_pid_func;
187         }
188
189         global_ops.func = func;
190 }
191
192 static void update_ftrace_function(void)
193 {
194         ftrace_func_t func;
195
196         update_global_ops();
197
198         /*
199          * If we are at the end of the list and this ops is
200          * not dynamic, then have the mcount trampoline call
201          * the function directly
202          */
203         if (ftrace_ops_list == &ftrace_list_end ||
204             (ftrace_ops_list->next == &ftrace_list_end &&
205              !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
206                 func = ftrace_ops_list->func;
207         else
208                 func = ftrace_ops_list_func;
209
210 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
211         ftrace_trace_function = func;
212 #else
213         __ftrace_trace_function = func;
214         ftrace_trace_function = ftrace_test_stop_func;
215 #endif
216 }
217
218 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
219 {
220         ops->next = *list;
221         /*
222          * We are entering ops into the list but another
223          * CPU might be walking that list. We need to make sure
224          * the ops->next pointer is valid before another CPU sees
225          * the ops pointer included into the list.
226          */
227         rcu_assign_pointer(*list, ops);
228 }
229
230 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
231 {
232         struct ftrace_ops **p;
233
234         /*
235          * If we are removing the last function, then simply point
236          * to the ftrace_stub.
237          */
238         if (*list == ops && ops->next == &ftrace_list_end) {
239                 *list = &ftrace_list_end;
240                 return 0;
241         }
242
243         for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
244                 if (*p == ops)
245                         break;
246
247         if (*p != ops)
248                 return -1;
249
250         *p = (*p)->next;
251         return 0;
252 }
253
254 static int __register_ftrace_function(struct ftrace_ops *ops)
255 {
256         if (ftrace_disabled)
257                 return -ENODEV;
258
259         if (FTRACE_WARN_ON(ops == &global_ops))
260                 return -EINVAL;
261
262         if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
263                 return -EBUSY;
264
265         if (!core_kernel_data((unsigned long)ops))
266                 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
267
268         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
269                 int first = ftrace_global_list == &ftrace_list_end;
270                 add_ftrace_ops(&ftrace_global_list, ops);
271                 ops->flags |= FTRACE_OPS_FL_ENABLED;
272                 if (first)
273                         add_ftrace_ops(&ftrace_ops_list, &global_ops);
274         } else
275                 add_ftrace_ops(&ftrace_ops_list, ops);
276
277         if (ftrace_enabled)
278                 update_ftrace_function();
279
280         return 0;
281 }
282
283 static int __unregister_ftrace_function(struct ftrace_ops *ops)
284 {
285         int ret;
286
287         if (ftrace_disabled)
288                 return -ENODEV;
289
290         if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
291                 return -EBUSY;
292
293         if (FTRACE_WARN_ON(ops == &global_ops))
294                 return -EINVAL;
295
296         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
297                 ret = remove_ftrace_ops(&ftrace_global_list, ops);
298                 if (!ret && ftrace_global_list == &ftrace_list_end)
299                         ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
300                 if (!ret)
301                         ops->flags &= ~FTRACE_OPS_FL_ENABLED;
302         } else
303                 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
304
305         if (ret < 0)
306                 return ret;
307
308         if (ftrace_enabled)
309                 update_ftrace_function();
310
311         /*
312          * Dynamic ops may be freed, we must make sure that all
313          * callers are done before leaving this function.
314          */
315         if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
316                 synchronize_sched();
317
318         return 0;
319 }
320
321 static void ftrace_update_pid_func(void)
322 {
323         /* Only do something if we are tracing something */
324         if (ftrace_trace_function == ftrace_stub)
325                 return;
326
327         update_ftrace_function();
328 }
329
330 #ifdef CONFIG_FUNCTION_PROFILER
331 struct ftrace_profile {
332         struct hlist_node               node;
333         unsigned long                   ip;
334         unsigned long                   counter;
335 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
336         unsigned long long              time;
337         unsigned long long              time_squared;
338 #endif
339 };
340
341 struct ftrace_profile_page {
342         struct ftrace_profile_page      *next;
343         unsigned long                   index;
344         struct ftrace_profile           records[];
345 };
346
347 struct ftrace_profile_stat {
348         atomic_t                        disabled;
349         struct hlist_head               *hash;
350         struct ftrace_profile_page      *pages;
351         struct ftrace_profile_page      *start;
352         struct tracer_stat              stat;
353 };
354
355 #define PROFILE_RECORDS_SIZE                                            \
356         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
357
358 #define PROFILES_PER_PAGE                                       \
359         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
360
361 static int ftrace_profile_bits __read_mostly;
362 static int ftrace_profile_enabled __read_mostly;
363
364 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
365 static DEFINE_MUTEX(ftrace_profile_lock);
366
367 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
368
369 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
370
371 static void *
372 function_stat_next(void *v, int idx)
373 {
374         struct ftrace_profile *rec = v;
375         struct ftrace_profile_page *pg;
376
377         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
378
379  again:
380         if (idx != 0)
381                 rec++;
382
383         if ((void *)rec >= (void *)&pg->records[pg->index]) {
384                 pg = pg->next;
385                 if (!pg)
386                         return NULL;
387                 rec = &pg->records[0];
388                 if (!rec->counter)
389                         goto again;
390         }
391
392         return rec;
393 }
394
395 static void *function_stat_start(struct tracer_stat *trace)
396 {
397         struct ftrace_profile_stat *stat =
398                 container_of(trace, struct ftrace_profile_stat, stat);
399
400         if (!stat || !stat->start)
401                 return NULL;
402
403         return function_stat_next(&stat->start->records[0], 0);
404 }
405
406 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
407 /* function graph compares on total time */
408 static int function_stat_cmp(void *p1, void *p2)
409 {
410         struct ftrace_profile *a = p1;
411         struct ftrace_profile *b = p2;
412
413         if (a->time < b->time)
414                 return -1;
415         if (a->time > b->time)
416                 return 1;
417         else
418                 return 0;
419 }
420 #else
421 /* not function graph compares against hits */
422 static int function_stat_cmp(void *p1, void *p2)
423 {
424         struct ftrace_profile *a = p1;
425         struct ftrace_profile *b = p2;
426
427         if (a->counter < b->counter)
428                 return -1;
429         if (a->counter > b->counter)
430                 return 1;
431         else
432                 return 0;
433 }
434 #endif
435
436 static int function_stat_headers(struct seq_file *m)
437 {
438 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
439         seq_printf(m, "  Function                               "
440                    "Hit    Time            Avg             s^2\n"
441                       "  --------                               "
442                    "---    ----            ---             ---\n");
443 #else
444         seq_printf(m, "  Function                               Hit\n"
445                       "  --------                               ---\n");
446 #endif
447         return 0;
448 }
449
450 static int function_stat_show(struct seq_file *m, void *v)
451 {
452         struct ftrace_profile *rec = v;
453         char str[KSYM_SYMBOL_LEN];
454         int ret = 0;
455 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
456         static struct trace_seq s;
457         unsigned long long avg;
458         unsigned long long stddev;
459 #endif
460         mutex_lock(&ftrace_profile_lock);
461
462         /* we raced with function_profile_reset() */
463         if (unlikely(rec->counter == 0)) {
464                 ret = -EBUSY;
465                 goto out;
466         }
467
468         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
469         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
470
471 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
472         seq_printf(m, "    ");
473         avg = rec->time;
474         do_div(avg, rec->counter);
475
476         /* Sample standard deviation (s^2) */
477         if (rec->counter <= 1)
478                 stddev = 0;
479         else {
480                 stddev = rec->time_squared - rec->counter * avg * avg;
481                 /*
482                  * Divide only 1000 for ns^2 -> us^2 conversion.
483                  * trace_print_graph_duration will divide 1000 again.
484                  */
485                 do_div(stddev, (rec->counter - 1) * 1000);
486         }
487
488         trace_seq_init(&s);
489         trace_print_graph_duration(rec->time, &s);
490         trace_seq_puts(&s, "    ");
491         trace_print_graph_duration(avg, &s);
492         trace_seq_puts(&s, "    ");
493         trace_print_graph_duration(stddev, &s);
494         trace_print_seq(m, &s);
495 #endif
496         seq_putc(m, '\n');
497 out:
498         mutex_unlock(&ftrace_profile_lock);
499
500         return ret;
501 }
502
503 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
504 {
505         struct ftrace_profile_page *pg;
506
507         pg = stat->pages = stat->start;
508
509         while (pg) {
510                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
511                 pg->index = 0;
512                 pg = pg->next;
513         }
514
515         memset(stat->hash, 0,
516                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
517 }
518
519 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
520 {
521         struct ftrace_profile_page *pg;
522         int functions;
523         int pages;
524         int i;
525
526         /* If we already allocated, do nothing */
527         if (stat->pages)
528                 return 0;
529
530         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
531         if (!stat->pages)
532                 return -ENOMEM;
533
534 #ifdef CONFIG_DYNAMIC_FTRACE
535         functions = ftrace_update_tot_cnt;
536 #else
537         /*
538          * We do not know the number of functions that exist because
539          * dynamic tracing is what counts them. With past experience
540          * we have around 20K functions. That should be more than enough.
541          * It is highly unlikely we will execute every function in
542          * the kernel.
543          */
544         functions = 20000;
545 #endif
546
547         pg = stat->start = stat->pages;
548
549         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
550
551         for (i = 1; i < pages; i++) {
552                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
553                 if (!pg->next)
554                         goto out_free;
555                 pg = pg->next;
556         }
557
558         return 0;
559
560  out_free:
561         pg = stat->start;
562         while (pg) {
563                 unsigned long tmp = (unsigned long)pg;
564
565                 pg = pg->next;
566                 free_page(tmp);
567         }
568
569         stat->pages = NULL;
570         stat->start = NULL;
571
572         return -ENOMEM;
573 }
574
575 static int ftrace_profile_init_cpu(int cpu)
576 {
577         struct ftrace_profile_stat *stat;
578         int size;
579
580         stat = &per_cpu(ftrace_profile_stats, cpu);
581
582         if (stat->hash) {
583                 /* If the profile is already created, simply reset it */
584                 ftrace_profile_reset(stat);
585                 return 0;
586         }
587
588         /*
589          * We are profiling all functions, but usually only a few thousand
590          * functions are hit. We'll make a hash of 1024 items.
591          */
592         size = FTRACE_PROFILE_HASH_SIZE;
593
594         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
595
596         if (!stat->hash)
597                 return -ENOMEM;
598
599         if (!ftrace_profile_bits) {
600                 size--;
601
602                 for (; size; size >>= 1)
603                         ftrace_profile_bits++;
604         }
605
606         /* Preallocate the function profiling pages */
607         if (ftrace_profile_pages_init(stat) < 0) {
608                 kfree(stat->hash);
609                 stat->hash = NULL;
610                 return -ENOMEM;
611         }
612
613         return 0;
614 }
615
616 static int ftrace_profile_init(void)
617 {
618         int cpu;
619         int ret = 0;
620
621         for_each_online_cpu(cpu) {
622                 ret = ftrace_profile_init_cpu(cpu);
623                 if (ret)
624                         break;
625         }
626
627         return ret;
628 }
629
630 /* interrupts must be disabled */
631 static struct ftrace_profile *
632 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
633 {
634         struct ftrace_profile *rec;
635         struct hlist_head *hhd;
636         struct hlist_node *n;
637         unsigned long key;
638
639         key = hash_long(ip, ftrace_profile_bits);
640         hhd = &stat->hash[key];
641
642         if (hlist_empty(hhd))
643                 return NULL;
644
645         hlist_for_each_entry_rcu(rec, n, hhd, node) {
646                 if (rec->ip == ip)
647                         return rec;
648         }
649
650         return NULL;
651 }
652
653 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
654                                struct ftrace_profile *rec)
655 {
656         unsigned long key;
657
658         key = hash_long(rec->ip, ftrace_profile_bits);
659         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
660 }
661
662 /*
663  * The memory is already allocated, this simply finds a new record to use.
664  */
665 static struct ftrace_profile *
666 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
667 {
668         struct ftrace_profile *rec = NULL;
669
670         /* prevent recursion (from NMIs) */
671         if (atomic_inc_return(&stat->disabled) != 1)
672                 goto out;
673
674         /*
675          * Try to find the function again since an NMI
676          * could have added it
677          */
678         rec = ftrace_find_profiled_func(stat, ip);
679         if (rec)
680                 goto out;
681
682         if (stat->pages->index == PROFILES_PER_PAGE) {
683                 if (!stat->pages->next)
684                         goto out;
685                 stat->pages = stat->pages->next;
686         }
687
688         rec = &stat->pages->records[stat->pages->index++];
689         rec->ip = ip;
690         ftrace_add_profile(stat, rec);
691
692  out:
693         atomic_dec(&stat->disabled);
694
695         return rec;
696 }
697
698 static void
699 function_profile_call(unsigned long ip, unsigned long parent_ip)
700 {
701         struct ftrace_profile_stat *stat;
702         struct ftrace_profile *rec;
703         unsigned long flags;
704
705         if (!ftrace_profile_enabled)
706                 return;
707
708         local_irq_save(flags);
709
710         stat = &__get_cpu_var(ftrace_profile_stats);
711         if (!stat->hash || !ftrace_profile_enabled)
712                 goto out;
713
714         rec = ftrace_find_profiled_func(stat, ip);
715         if (!rec) {
716                 rec = ftrace_profile_alloc(stat, ip);
717                 if (!rec)
718                         goto out;
719         }
720
721         rec->counter++;
722  out:
723         local_irq_restore(flags);
724 }
725
726 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
727 static int profile_graph_entry(struct ftrace_graph_ent *trace)
728 {
729         function_profile_call(trace->func, 0);
730         return 1;
731 }
732
733 static void profile_graph_return(struct ftrace_graph_ret *trace)
734 {
735         struct ftrace_profile_stat *stat;
736         unsigned long long calltime;
737         struct ftrace_profile *rec;
738         unsigned long flags;
739
740         local_irq_save(flags);
741         stat = &__get_cpu_var(ftrace_profile_stats);
742         if (!stat->hash || !ftrace_profile_enabled)
743                 goto out;
744
745         /* If the calltime was zero'd ignore it */
746         if (!trace->calltime)
747                 goto out;
748
749         calltime = trace->rettime - trace->calltime;
750
751         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
752                 int index;
753
754                 index = trace->depth;
755
756                 /* Append this call time to the parent time to subtract */
757                 if (index)
758                         current->ret_stack[index - 1].subtime += calltime;
759
760                 if (current->ret_stack[index].subtime < calltime)
761                         calltime -= current->ret_stack[index].subtime;
762                 else
763                         calltime = 0;
764         }
765
766         rec = ftrace_find_profiled_func(stat, trace->func);
767         if (rec) {
768                 rec->time += calltime;
769                 rec->time_squared += calltime * calltime;
770         }
771
772  out:
773         local_irq_restore(flags);
774 }
775
776 static int register_ftrace_profiler(void)
777 {
778         return register_ftrace_graph(&profile_graph_return,
779                                      &profile_graph_entry);
780 }
781
782 static void unregister_ftrace_profiler(void)
783 {
784         unregister_ftrace_graph();
785 }
786 #else
787 static struct ftrace_ops ftrace_profile_ops __read_mostly =
788 {
789         .func           = function_profile_call,
790 };
791
792 static int register_ftrace_profiler(void)
793 {
794         return register_ftrace_function(&ftrace_profile_ops);
795 }
796
797 static void unregister_ftrace_profiler(void)
798 {
799         unregister_ftrace_function(&ftrace_profile_ops);
800 }
801 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
802
803 static ssize_t
804 ftrace_profile_write(struct file *filp, const char __user *ubuf,
805                      size_t cnt, loff_t *ppos)
806 {
807         unsigned long val;
808         char buf[64];           /* big enough to hold a number */
809         int ret;
810
811         if (cnt >= sizeof(buf))
812                 return -EINVAL;
813
814         if (copy_from_user(&buf, ubuf, cnt))
815                 return -EFAULT;
816
817         buf[cnt] = 0;
818
819         ret = strict_strtoul(buf, 10, &val);
820         if (ret < 0)
821                 return ret;
822
823         val = !!val;
824
825         mutex_lock(&ftrace_profile_lock);
826         if (ftrace_profile_enabled ^ val) {
827                 if (val) {
828                         ret = ftrace_profile_init();
829                         if (ret < 0) {
830                                 cnt = ret;
831                                 goto out;
832                         }
833
834                         ret = register_ftrace_profiler();
835                         if (ret < 0) {
836                                 cnt = ret;
837                                 goto out;
838                         }
839                         ftrace_profile_enabled = 1;
840                 } else {
841                         ftrace_profile_enabled = 0;
842                         /*
843                          * unregister_ftrace_profiler calls stop_machine
844                          * so this acts like an synchronize_sched.
845                          */
846                         unregister_ftrace_profiler();
847                 }
848         }
849  out:
850         mutex_unlock(&ftrace_profile_lock);
851
852         *ppos += cnt;
853
854         return cnt;
855 }
856
857 static ssize_t
858 ftrace_profile_read(struct file *filp, char __user *ubuf,
859                      size_t cnt, loff_t *ppos)
860 {
861         char buf[64];           /* big enough to hold a number */
862         int r;
863
864         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
865         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
866 }
867
868 static const struct file_operations ftrace_profile_fops = {
869         .open           = tracing_open_generic,
870         .read           = ftrace_profile_read,
871         .write          = ftrace_profile_write,
872         .llseek         = default_llseek,
873 };
874
875 /* used to initialize the real stat files */
876 static struct tracer_stat function_stats __initdata = {
877         .name           = "functions",
878         .stat_start     = function_stat_start,
879         .stat_next      = function_stat_next,
880         .stat_cmp       = function_stat_cmp,
881         .stat_headers   = function_stat_headers,
882         .stat_show      = function_stat_show
883 };
884
885 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
886 {
887         struct ftrace_profile_stat *stat;
888         struct dentry *entry;
889         char *name;
890         int ret;
891         int cpu;
892
893         for_each_possible_cpu(cpu) {
894                 stat = &per_cpu(ftrace_profile_stats, cpu);
895
896                 /* allocate enough for function name + cpu number */
897                 name = kmalloc(32, GFP_KERNEL);
898                 if (!name) {
899                         /*
900                          * The files created are permanent, if something happens
901                          * we still do not free memory.
902                          */
903                         WARN(1,
904                              "Could not allocate stat file for cpu %d\n",
905                              cpu);
906                         return;
907                 }
908                 stat->stat = function_stats;
909                 snprintf(name, 32, "function%d", cpu);
910                 stat->stat.name = name;
911                 ret = register_stat_tracer(&stat->stat);
912                 if (ret) {
913                         WARN(1,
914                              "Could not register function stat for cpu %d\n",
915                              cpu);
916                         kfree(name);
917                         return;
918                 }
919         }
920
921         entry = debugfs_create_file("function_profile_enabled", 0644,
922                                     d_tracer, NULL, &ftrace_profile_fops);
923         if (!entry)
924                 pr_warning("Could not create debugfs "
925                            "'function_profile_enabled' entry\n");
926 }
927
928 #else /* CONFIG_FUNCTION_PROFILER */
929 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
930 {
931 }
932 #endif /* CONFIG_FUNCTION_PROFILER */
933
934 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
935
936 #ifdef CONFIG_DYNAMIC_FTRACE
937
938 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
939 # error Dynamic ftrace depends on MCOUNT_RECORD
940 #endif
941
942 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
943
944 struct ftrace_func_probe {
945         struct hlist_node       node;
946         struct ftrace_probe_ops *ops;
947         unsigned long           flags;
948         unsigned long           ip;
949         void                    *data;
950         struct rcu_head         rcu;
951 };
952
953 enum {
954         FTRACE_UPDATE_CALLS             = (1 << 0),
955         FTRACE_DISABLE_CALLS            = (1 << 1),
956         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
957         FTRACE_START_FUNC_RET           = (1 << 3),
958         FTRACE_STOP_FUNC_RET            = (1 << 4),
959 };
960 struct ftrace_func_entry {
961         struct hlist_node hlist;
962         unsigned long ip;
963 };
964
965 struct ftrace_hash {
966         unsigned long           size_bits;
967         struct hlist_head       *buckets;
968         unsigned long           count;
969         struct rcu_head         rcu;
970 };
971
972 /*
973  * We make these constant because no one should touch them,
974  * but they are used as the default "empty hash", to avoid allocating
975  * it all the time. These are in a read only section such that if
976  * anyone does try to modify it, it will cause an exception.
977  */
978 static const struct hlist_head empty_buckets[1];
979 static const struct ftrace_hash empty_hash = {
980         .buckets = (struct hlist_head *)empty_buckets,
981 };
982 #define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
983
984 static struct ftrace_ops global_ops = {
985         .func                   = ftrace_stub,
986         .notrace_hash           = EMPTY_HASH,
987         .filter_hash            = EMPTY_HASH,
988 };
989
990 static struct dyn_ftrace *ftrace_new_addrs;
991
992 static DEFINE_MUTEX(ftrace_regex_lock);
993
994 struct ftrace_page {
995         struct ftrace_page      *next;
996         int                     index;
997         struct dyn_ftrace       records[];
998 };
999
1000 #define ENTRIES_PER_PAGE \
1001   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
1002
1003 /* estimate from running different kernels */
1004 #define NR_TO_INIT              10000
1005
1006 static struct ftrace_page       *ftrace_pages_start;
1007 static struct ftrace_page       *ftrace_pages;
1008
1009 static struct dyn_ftrace *ftrace_free_records;
1010
1011 static struct ftrace_func_entry *
1012 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1013 {
1014         unsigned long key;
1015         struct ftrace_func_entry *entry;
1016         struct hlist_head *hhd;
1017         struct hlist_node *n;
1018
1019         if (!hash->count)
1020                 return NULL;
1021
1022         if (hash->size_bits > 0)
1023                 key = hash_long(ip, hash->size_bits);
1024         else
1025                 key = 0;
1026
1027         hhd = &hash->buckets[key];
1028
1029         hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1030                 if (entry->ip == ip)
1031                         return entry;
1032         }
1033         return NULL;
1034 }
1035
1036 static void __add_hash_entry(struct ftrace_hash *hash,
1037                              struct ftrace_func_entry *entry)
1038 {
1039         struct hlist_head *hhd;
1040         unsigned long key;
1041
1042         if (hash->size_bits)
1043                 key = hash_long(entry->ip, hash->size_bits);
1044         else
1045                 key = 0;
1046
1047         hhd = &hash->buckets[key];
1048         hlist_add_head(&entry->hlist, hhd);
1049         hash->count++;
1050 }
1051
1052 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1053 {
1054         struct ftrace_func_entry *entry;
1055
1056         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1057         if (!entry)
1058                 return -ENOMEM;
1059
1060         entry->ip = ip;
1061         __add_hash_entry(hash, entry);
1062
1063         return 0;
1064 }
1065
1066 static void
1067 free_hash_entry(struct ftrace_hash *hash,
1068                   struct ftrace_func_entry *entry)
1069 {
1070         hlist_del(&entry->hlist);
1071         kfree(entry);
1072         hash->count--;
1073 }
1074
1075 static void
1076 remove_hash_entry(struct ftrace_hash *hash,
1077                   struct ftrace_func_entry *entry)
1078 {
1079         hlist_del(&entry->hlist);
1080         hash->count--;
1081 }
1082
1083 static void ftrace_hash_clear(struct ftrace_hash *hash)
1084 {
1085         struct hlist_head *hhd;
1086         struct hlist_node *tp, *tn;
1087         struct ftrace_func_entry *entry;
1088         int size = 1 << hash->size_bits;
1089         int i;
1090
1091         if (!hash->count)
1092                 return;
1093
1094         for (i = 0; i < size; i++) {
1095                 hhd = &hash->buckets[i];
1096                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1097                         free_hash_entry(hash, entry);
1098         }
1099         FTRACE_WARN_ON(hash->count);
1100 }
1101
1102 static void free_ftrace_hash(struct ftrace_hash *hash)
1103 {
1104         if (!hash || hash == EMPTY_HASH)
1105                 return;
1106         ftrace_hash_clear(hash);
1107         kfree(hash->buckets);
1108         kfree(hash);
1109 }
1110
1111 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1112 {
1113         struct ftrace_hash *hash;
1114
1115         hash = container_of(rcu, struct ftrace_hash, rcu);
1116         free_ftrace_hash(hash);
1117 }
1118
1119 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1120 {
1121         if (!hash || hash == EMPTY_HASH)
1122                 return;
1123         call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1124 }
1125
1126 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1127 {
1128         struct ftrace_hash *hash;
1129         int size;
1130
1131         hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1132         if (!hash)
1133                 return NULL;
1134
1135         size = 1 << size_bits;
1136         hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
1137
1138         if (!hash->buckets) {
1139                 kfree(hash);
1140                 return NULL;
1141         }
1142
1143         hash->size_bits = size_bits;
1144
1145         return hash;
1146 }
1147
1148 static struct ftrace_hash *
1149 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1150 {
1151         struct ftrace_func_entry *entry;
1152         struct ftrace_hash *new_hash;
1153         struct hlist_node *tp;
1154         int size;
1155         int ret;
1156         int i;
1157
1158         new_hash = alloc_ftrace_hash(size_bits);
1159         if (!new_hash)
1160                 return NULL;
1161
1162         /* Empty hash? */
1163         if (!hash || !hash->count)
1164                 return new_hash;
1165
1166         size = 1 << hash->size_bits;
1167         for (i = 0; i < size; i++) {
1168                 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1169                         ret = add_hash_entry(new_hash, entry->ip);
1170                         if (ret < 0)
1171                                 goto free_hash;
1172                 }
1173         }
1174
1175         FTRACE_WARN_ON(new_hash->count != hash->count);
1176
1177         return new_hash;
1178
1179  free_hash:
1180         free_ftrace_hash(new_hash);
1181         return NULL;
1182 }
1183
1184 static void
1185 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1186 static void
1187 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1188
1189 static int
1190 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1191                  struct ftrace_hash **dst, struct ftrace_hash *src)
1192 {
1193         struct ftrace_func_entry *entry;
1194         struct hlist_node *tp, *tn;
1195         struct hlist_head *hhd;
1196         struct ftrace_hash *old_hash;
1197         struct ftrace_hash *new_hash;
1198         unsigned long key;
1199         int size = src->count;
1200         int bits = 0;
1201         int ret;
1202         int i;
1203
1204         /*
1205          * Remove the current set, update the hash and add
1206          * them back.
1207          */
1208         ftrace_hash_rec_disable(ops, enable);
1209
1210         /*
1211          * If the new source is empty, just free dst and assign it
1212          * the empty_hash.
1213          */
1214         if (!src->count) {
1215                 free_ftrace_hash_rcu(*dst);
1216                 rcu_assign_pointer(*dst, EMPTY_HASH);
1217                 return 0;
1218         }
1219
1220         /*
1221          * Make the hash size about 1/2 the # found
1222          */
1223         for (size /= 2; size; size >>= 1)
1224                 bits++;
1225
1226         /* Don't allocate too much */
1227         if (bits > FTRACE_HASH_MAX_BITS)
1228                 bits = FTRACE_HASH_MAX_BITS;
1229
1230         ret = -ENOMEM;
1231         new_hash = alloc_ftrace_hash(bits);
1232         if (!new_hash)
1233                 goto out;
1234
1235         size = 1 << src->size_bits;
1236         for (i = 0; i < size; i++) {
1237                 hhd = &src->buckets[i];
1238                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1239                         if (bits > 0)
1240                                 key = hash_long(entry->ip, bits);
1241                         else
1242                                 key = 0;
1243                         remove_hash_entry(src, entry);
1244                         __add_hash_entry(new_hash, entry);
1245                 }
1246         }
1247
1248         old_hash = *dst;
1249         rcu_assign_pointer(*dst, new_hash);
1250         free_ftrace_hash_rcu(old_hash);
1251
1252         ret = 0;
1253  out:
1254         /*
1255          * Enable regardless of ret:
1256          *  On success, we enable the new hash.
1257          *  On failure, we re-enable the original hash.
1258          */
1259         ftrace_hash_rec_enable(ops, enable);
1260
1261         return ret;
1262 }
1263
1264 /*
1265  * Test the hashes for this ops to see if we want to call
1266  * the ops->func or not.
1267  *
1268  * It's a match if the ip is in the ops->filter_hash or
1269  * the filter_hash does not exist or is empty,
1270  *  AND
1271  * the ip is not in the ops->notrace_hash.
1272  *
1273  * This needs to be called with preemption disabled as
1274  * the hashes are freed with call_rcu_sched().
1275  */
1276 static int
1277 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1278 {
1279         struct ftrace_hash *filter_hash;
1280         struct ftrace_hash *notrace_hash;
1281         int ret;
1282
1283         filter_hash = rcu_dereference_raw(ops->filter_hash);
1284         notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1285
1286         if ((!filter_hash || !filter_hash->count ||
1287              ftrace_lookup_ip(filter_hash, ip)) &&
1288             (!notrace_hash || !notrace_hash->count ||
1289              !ftrace_lookup_ip(notrace_hash, ip)))
1290                 ret = 1;
1291         else
1292                 ret = 0;
1293
1294         return ret;
1295 }
1296
1297 /*
1298  * This is a double for. Do not use 'break' to break out of the loop,
1299  * you must use a goto.
1300  */
1301 #define do_for_each_ftrace_rec(pg, rec)                                 \
1302         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1303                 int _____i;                                             \
1304                 for (_____i = 0; _____i < pg->index; _____i++) {        \
1305                         rec = &pg->records[_____i];
1306
1307 #define while_for_each_ftrace_rec()             \
1308                 }                               \
1309         }
1310
1311 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1312                                      int filter_hash,
1313                                      bool inc)
1314 {
1315         struct ftrace_hash *hash;
1316         struct ftrace_hash *other_hash;
1317         struct ftrace_page *pg;
1318         struct dyn_ftrace *rec;
1319         int count = 0;
1320         int all = 0;
1321
1322         /* Only update if the ops has been registered */
1323         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1324                 return;
1325
1326         /*
1327          * In the filter_hash case:
1328          *   If the count is zero, we update all records.
1329          *   Otherwise we just update the items in the hash.
1330          *
1331          * In the notrace_hash case:
1332          *   We enable the update in the hash.
1333          *   As disabling notrace means enabling the tracing,
1334          *   and enabling notrace means disabling, the inc variable
1335          *   gets inversed.
1336          */
1337         if (filter_hash) {
1338                 hash = ops->filter_hash;
1339                 other_hash = ops->notrace_hash;
1340                 if (!hash || !hash->count)
1341                         all = 1;
1342         } else {
1343                 inc = !inc;
1344                 hash = ops->notrace_hash;
1345                 other_hash = ops->filter_hash;
1346                 /*
1347                  * If the notrace hash has no items,
1348                  * then there's nothing to do.
1349                  */
1350                 if (hash && !hash->count)
1351                         return;
1352         }
1353
1354         do_for_each_ftrace_rec(pg, rec) {
1355                 int in_other_hash = 0;
1356                 int in_hash = 0;
1357                 int match = 0;
1358
1359                 if (all) {
1360                         /*
1361                          * Only the filter_hash affects all records.
1362                          * Update if the record is not in the notrace hash.
1363                          */
1364                         if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1365                                 match = 1;
1366                 } else {
1367                         in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip);
1368                         in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip);
1369
1370                         /*
1371                          *
1372                          */
1373                         if (filter_hash && in_hash && !in_other_hash)
1374                                 match = 1;
1375                         else if (!filter_hash && in_hash &&
1376                                  (in_other_hash || !other_hash->count))
1377                                 match = 1;
1378                 }
1379                 if (!match)
1380                         continue;
1381
1382                 if (inc) {
1383                         rec->flags++;
1384                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1385                                 return;
1386                 } else {
1387                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1388                                 return;
1389                         rec->flags--;
1390                 }
1391                 count++;
1392                 /* Shortcut, if we handled all records, we are done. */
1393                 if (!all && count == hash->count)
1394                         return;
1395         } while_for_each_ftrace_rec();
1396 }
1397
1398 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1399                                     int filter_hash)
1400 {
1401         __ftrace_hash_rec_update(ops, filter_hash, 0);
1402 }
1403
1404 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1405                                    int filter_hash)
1406 {
1407         __ftrace_hash_rec_update(ops, filter_hash, 1);
1408 }
1409
1410 static void ftrace_free_rec(struct dyn_ftrace *rec)
1411 {
1412         rec->freelist = ftrace_free_records;
1413         ftrace_free_records = rec;
1414         rec->flags |= FTRACE_FL_FREE;
1415 }
1416
1417 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
1418 {
1419         struct dyn_ftrace *rec;
1420
1421         /* First check for freed records */
1422         if (ftrace_free_records) {
1423                 rec = ftrace_free_records;
1424
1425                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
1426                         FTRACE_WARN_ON_ONCE(1);
1427                         ftrace_free_records = NULL;
1428                         return NULL;
1429                 }
1430
1431                 ftrace_free_records = rec->freelist;
1432                 memset(rec, 0, sizeof(*rec));
1433                 return rec;
1434         }
1435
1436         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
1437                 if (!ftrace_pages->next) {
1438                         /* allocate another page */
1439                         ftrace_pages->next =
1440                                 (void *)get_zeroed_page(GFP_KERNEL);
1441                         if (!ftrace_pages->next)
1442                                 return NULL;
1443                 }
1444                 ftrace_pages = ftrace_pages->next;
1445         }
1446
1447         return &ftrace_pages->records[ftrace_pages->index++];
1448 }
1449
1450 static struct dyn_ftrace *
1451 ftrace_record_ip(unsigned long ip)
1452 {
1453         struct dyn_ftrace *rec;
1454
1455         if (ftrace_disabled)
1456                 return NULL;
1457
1458         rec = ftrace_alloc_dyn_node(ip);
1459         if (!rec)
1460                 return NULL;
1461
1462         rec->ip = ip;
1463         rec->newlist = ftrace_new_addrs;
1464         ftrace_new_addrs = rec;
1465
1466         return rec;
1467 }
1468
1469 static void print_ip_ins(const char *fmt, unsigned char *p)
1470 {
1471         int i;
1472
1473         printk(KERN_CONT "%s", fmt);
1474
1475         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1476                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1477 }
1478
1479 static void ftrace_bug(int failed, unsigned long ip)
1480 {
1481         switch (failed) {
1482         case -EFAULT:
1483                 FTRACE_WARN_ON_ONCE(1);
1484                 pr_info("ftrace faulted on modifying ");
1485                 print_ip_sym(ip);
1486                 break;
1487         case -EINVAL:
1488                 FTRACE_WARN_ON_ONCE(1);
1489                 pr_info("ftrace failed to modify ");
1490                 print_ip_sym(ip);
1491                 print_ip_ins(" actual: ", (unsigned char *)ip);
1492                 printk(KERN_CONT "\n");
1493                 break;
1494         case -EPERM:
1495                 FTRACE_WARN_ON_ONCE(1);
1496                 pr_info("ftrace faulted on writing ");
1497                 print_ip_sym(ip);
1498                 break;
1499         default:
1500                 FTRACE_WARN_ON_ONCE(1);
1501                 pr_info("ftrace faulted on unknown error ");
1502                 print_ip_sym(ip);
1503         }
1504 }
1505
1506
1507 /* Return 1 if the address range is reserved for ftrace */
1508 int ftrace_text_reserved(void *start, void *end)
1509 {
1510         struct dyn_ftrace *rec;
1511         struct ftrace_page *pg;
1512
1513         do_for_each_ftrace_rec(pg, rec) {
1514                 if (rec->ip <= (unsigned long)end &&
1515                     rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1516                         return 1;
1517         } while_for_each_ftrace_rec();
1518         return 0;
1519 }
1520
1521
1522 static int
1523 __ftrace_replace_code(struct dyn_ftrace *rec, int update)
1524 {
1525         unsigned long ftrace_addr;
1526         unsigned long flag = 0UL;
1527
1528         ftrace_addr = (unsigned long)FTRACE_ADDR;
1529
1530         /*
1531          * If we are updating calls:
1532          *
1533          *   If the record has a ref count, then we need to enable it
1534          *   because someone is using it.
1535          *
1536          *   Otherwise we make sure its disabled.
1537          *
1538          * If we are disabling calls, then disable all records that
1539          * are enabled.
1540          */
1541         if (update && (rec->flags & ~FTRACE_FL_MASK))
1542                 flag = FTRACE_FL_ENABLED;
1543
1544         /* If the state of this record hasn't changed, then do nothing */
1545         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1546                 return 0;
1547
1548         if (flag) {
1549                 rec->flags |= FTRACE_FL_ENABLED;
1550                 return ftrace_make_call(rec, ftrace_addr);
1551         }
1552
1553         rec->flags &= ~FTRACE_FL_ENABLED;
1554         return ftrace_make_nop(NULL, rec, ftrace_addr);
1555 }
1556
1557 static void ftrace_replace_code(int update)
1558 {
1559         struct dyn_ftrace *rec;
1560         struct ftrace_page *pg;
1561         int failed;
1562
1563         if (unlikely(ftrace_disabled))
1564                 return;
1565
1566         do_for_each_ftrace_rec(pg, rec) {
1567                 /* Skip over free records */
1568                 if (rec->flags & FTRACE_FL_FREE)
1569                         continue;
1570
1571                 failed = __ftrace_replace_code(rec, update);
1572                 if (failed) {
1573                         ftrace_bug(failed, rec->ip);
1574                         /* Stop processing */
1575                         return;
1576                 }
1577         } while_for_each_ftrace_rec();
1578 }
1579
1580 static int
1581 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1582 {
1583         unsigned long ip;
1584         int ret;
1585
1586         ip = rec->ip;
1587
1588         if (unlikely(ftrace_disabled))
1589                 return 0;
1590
1591         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1592         if (ret) {
1593                 ftrace_bug(ret, ip);
1594                 return 0;
1595         }
1596         return 1;
1597 }
1598
1599 /*
1600  * archs can override this function if they must do something
1601  * before the modifying code is performed.
1602  */
1603 int __weak ftrace_arch_code_modify_prepare(void)
1604 {
1605         return 0;
1606 }
1607
1608 /*
1609  * archs can override this function if they must do something
1610  * after the modifying code is performed.
1611  */
1612 int __weak ftrace_arch_code_modify_post_process(void)
1613 {
1614         return 0;
1615 }
1616
1617 static int __ftrace_modify_code(void *data)
1618 {
1619         int *command = data;
1620
1621         if (*command & FTRACE_UPDATE_CALLS)
1622                 ftrace_replace_code(1);
1623         else if (*command & FTRACE_DISABLE_CALLS)
1624                 ftrace_replace_code(0);
1625
1626         if (*command & FTRACE_UPDATE_TRACE_FUNC)
1627                 ftrace_update_ftrace_func(ftrace_trace_function);
1628
1629         if (*command & FTRACE_START_FUNC_RET)
1630                 ftrace_enable_ftrace_graph_caller();
1631         else if (*command & FTRACE_STOP_FUNC_RET)
1632                 ftrace_disable_ftrace_graph_caller();
1633
1634         return 0;
1635 }
1636
1637 static void ftrace_run_update_code(int command)
1638 {
1639         int ret;
1640
1641         ret = ftrace_arch_code_modify_prepare();
1642         FTRACE_WARN_ON(ret);
1643         if (ret)
1644                 return;
1645
1646         stop_machine(__ftrace_modify_code, &command, NULL);
1647
1648         ret = ftrace_arch_code_modify_post_process();
1649         FTRACE_WARN_ON(ret);
1650 }
1651
1652 static ftrace_func_t saved_ftrace_func;
1653 static int ftrace_start_up;
1654 static int global_start_up;
1655
1656 static void ftrace_startup_enable(int command)
1657 {
1658         if (saved_ftrace_func != ftrace_trace_function) {
1659                 saved_ftrace_func = ftrace_trace_function;
1660                 command |= FTRACE_UPDATE_TRACE_FUNC;
1661         }
1662
1663         if (!command || !ftrace_enabled)
1664                 return;
1665
1666         ftrace_run_update_code(command);
1667 }
1668
1669 static int ftrace_startup(struct ftrace_ops *ops, int command)
1670 {
1671         bool hash_enable = true;
1672
1673         if (unlikely(ftrace_disabled))
1674                 return -ENODEV;
1675
1676         ftrace_start_up++;
1677         command |= FTRACE_UPDATE_CALLS;
1678
1679         /* ops marked global share the filter hashes */
1680         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1681                 ops = &global_ops;
1682                 /* Don't update hash if global is already set */
1683                 if (global_start_up)
1684                         hash_enable = false;
1685                 global_start_up++;
1686         }
1687
1688         ops->flags |= FTRACE_OPS_FL_ENABLED;
1689         if (hash_enable)
1690                 ftrace_hash_rec_enable(ops, 1);
1691
1692         ftrace_startup_enable(command);
1693
1694         return 0;
1695 }
1696
1697 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
1698 {
1699         bool hash_disable = true;
1700
1701         if (unlikely(ftrace_disabled))
1702                 return;
1703
1704         ftrace_start_up--;
1705         /*
1706          * Just warn in case of unbalance, no need to kill ftrace, it's not
1707          * critical but the ftrace_call callers may be never nopped again after
1708          * further ftrace uses.
1709          */
1710         WARN_ON_ONCE(ftrace_start_up < 0);
1711
1712         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1713                 ops = &global_ops;
1714                 global_start_up--;
1715                 WARN_ON_ONCE(global_start_up < 0);
1716                 /* Don't update hash if global still has users */
1717                 if (global_start_up) {
1718                         WARN_ON_ONCE(!ftrace_start_up);
1719                         hash_disable = false;
1720                 }
1721         }
1722
1723         if (hash_disable)
1724                 ftrace_hash_rec_disable(ops, 1);
1725
1726         if (ops != &global_ops || !global_start_up)
1727                 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
1728
1729         command |= FTRACE_UPDATE_CALLS;
1730
1731         if (saved_ftrace_func != ftrace_trace_function) {
1732                 saved_ftrace_func = ftrace_trace_function;
1733                 command |= FTRACE_UPDATE_TRACE_FUNC;
1734         }
1735
1736         if (!command || !ftrace_enabled)
1737                 return;
1738
1739         ftrace_run_update_code(command);
1740 }
1741
1742 static void ftrace_startup_sysctl(void)
1743 {
1744         if (unlikely(ftrace_disabled))
1745                 return;
1746
1747         /* Force update next time */
1748         saved_ftrace_func = NULL;
1749         /* ftrace_start_up is true if we want ftrace running */
1750         if (ftrace_start_up)
1751                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
1752 }
1753
1754 static void ftrace_shutdown_sysctl(void)
1755 {
1756         if (unlikely(ftrace_disabled))
1757                 return;
1758
1759         /* ftrace_start_up is true if ftrace is running */
1760         if (ftrace_start_up)
1761                 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
1762 }
1763
1764 static cycle_t          ftrace_update_time;
1765 static unsigned long    ftrace_update_cnt;
1766 unsigned long           ftrace_update_tot_cnt;
1767
1768 static int ops_traces_mod(struct ftrace_ops *ops)
1769 {
1770         struct ftrace_hash *hash;
1771
1772         hash = ops->filter_hash;
1773         return !!(!hash || !hash->count);
1774 }
1775
1776 static int ftrace_update_code(struct module *mod)
1777 {
1778         struct dyn_ftrace *p;
1779         cycle_t start, stop;
1780         unsigned long ref = 0;
1781
1782         /*
1783          * When adding a module, we need to check if tracers are
1784          * currently enabled and if they are set to trace all functions.
1785          * If they are, we need to enable the module functions as well
1786          * as update the reference counts for those function records.
1787          */
1788         if (mod) {
1789                 struct ftrace_ops *ops;
1790
1791                 for (ops = ftrace_ops_list;
1792                      ops != &ftrace_list_end; ops = ops->next) {
1793                         if (ops->flags & FTRACE_OPS_FL_ENABLED &&
1794                             ops_traces_mod(ops))
1795                                 ref++;
1796                 }
1797         }
1798
1799         start = ftrace_now(raw_smp_processor_id());
1800         ftrace_update_cnt = 0;
1801
1802         while (ftrace_new_addrs) {
1803
1804                 /* If something went wrong, bail without enabling anything */
1805                 if (unlikely(ftrace_disabled))
1806                         return -1;
1807
1808                 p = ftrace_new_addrs;
1809                 ftrace_new_addrs = p->newlist;
1810                 p->flags = ref;
1811
1812                 /*
1813                  * Do the initial record conversion from mcount jump
1814                  * to the NOP instructions.
1815                  */
1816                 if (!ftrace_code_disable(mod, p)) {
1817                         ftrace_free_rec(p);
1818                         /* Game over */
1819                         break;
1820                 }
1821
1822                 ftrace_update_cnt++;
1823
1824                 /*
1825                  * If the tracing is enabled, go ahead and enable the record.
1826                  *
1827                  * The reason not to enable the record immediatelly is the
1828                  * inherent check of ftrace_make_nop/ftrace_make_call for
1829                  * correct previous instructions.  Making first the NOP
1830                  * conversion puts the module to the correct state, thus
1831                  * passing the ftrace_make_call check.
1832                  */
1833                 if (ftrace_start_up && ref) {
1834                         int failed = __ftrace_replace_code(p, 1);
1835                         if (failed) {
1836                                 ftrace_bug(failed, p->ip);
1837                                 ftrace_free_rec(p);
1838                         }
1839                 }
1840         }
1841
1842         stop = ftrace_now(raw_smp_processor_id());
1843         ftrace_update_time = stop - start;
1844         ftrace_update_tot_cnt += ftrace_update_cnt;
1845
1846         return 0;
1847 }
1848
1849 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1850 {
1851         struct ftrace_page *pg;
1852         int cnt;
1853         int i;
1854
1855         /* allocate a few pages */
1856         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1857         if (!ftrace_pages_start)
1858                 return -1;
1859
1860         /*
1861          * Allocate a few more pages.
1862          *
1863          * TODO: have some parser search vmlinux before
1864          *   final linking to find all calls to ftrace.
1865          *   Then we can:
1866          *    a) know how many pages to allocate.
1867          *     and/or
1868          *    b) set up the table then.
1869          *
1870          *  The dynamic code is still necessary for
1871          *  modules.
1872          */
1873
1874         pg = ftrace_pages = ftrace_pages_start;
1875
1876         cnt = num_to_init / ENTRIES_PER_PAGE;
1877         pr_info("ftrace: allocating %ld entries in %d pages\n",
1878                 num_to_init, cnt + 1);
1879
1880         for (i = 0; i < cnt; i++) {
1881                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1882
1883                 /* If we fail, we'll try later anyway */
1884                 if (!pg->next)
1885                         break;
1886
1887                 pg = pg->next;
1888         }
1889
1890         return 0;
1891 }
1892
1893 enum {
1894         FTRACE_ITER_FILTER      = (1 << 0),
1895         FTRACE_ITER_NOTRACE     = (1 << 1),
1896         FTRACE_ITER_PRINTALL    = (1 << 2),
1897         FTRACE_ITER_HASH        = (1 << 3),
1898         FTRACE_ITER_ENABLED     = (1 << 4),
1899 };
1900
1901 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1902
1903 struct ftrace_iterator {
1904         loff_t                          pos;
1905         loff_t                          func_pos;
1906         struct ftrace_page              *pg;
1907         struct dyn_ftrace               *func;
1908         struct ftrace_func_probe        *probe;
1909         struct trace_parser             parser;
1910         struct ftrace_hash              *hash;
1911         struct ftrace_ops               *ops;
1912         int                             hidx;
1913         int                             idx;
1914         unsigned                        flags;
1915 };
1916
1917 static void *
1918 t_hash_next(struct seq_file *m, loff_t *pos)
1919 {
1920         struct ftrace_iterator *iter = m->private;
1921         struct hlist_node *hnd = NULL;
1922         struct hlist_head *hhd;
1923
1924         (*pos)++;
1925         iter->pos = *pos;
1926
1927         if (iter->probe)
1928                 hnd = &iter->probe->node;
1929  retry:
1930         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1931                 return NULL;
1932
1933         hhd = &ftrace_func_hash[iter->hidx];
1934
1935         if (hlist_empty(hhd)) {
1936                 iter->hidx++;
1937                 hnd = NULL;
1938                 goto retry;
1939         }
1940
1941         if (!hnd)
1942                 hnd = hhd->first;
1943         else {
1944                 hnd = hnd->next;
1945                 if (!hnd) {
1946                         iter->hidx++;
1947                         goto retry;
1948                 }
1949         }
1950
1951         if (WARN_ON_ONCE(!hnd))
1952                 return NULL;
1953
1954         iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
1955
1956         return iter;
1957 }
1958
1959 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1960 {
1961         struct ftrace_iterator *iter = m->private;
1962         void *p = NULL;
1963         loff_t l;
1964
1965         if (iter->func_pos > *pos)
1966                 return NULL;
1967
1968         iter->hidx = 0;
1969         for (l = 0; l <= (*pos - iter->func_pos); ) {
1970                 p = t_hash_next(m, &l);
1971                 if (!p)
1972                         break;
1973         }
1974         if (!p)
1975                 return NULL;
1976
1977         /* Only set this if we have an item */
1978         iter->flags |= FTRACE_ITER_HASH;
1979
1980         return iter;
1981 }
1982
1983 static int
1984 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
1985 {
1986         struct ftrace_func_probe *rec;
1987
1988         rec = iter->probe;
1989         if (WARN_ON_ONCE(!rec))
1990                 return -EIO;
1991
1992         if (rec->ops->print)
1993                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1994
1995         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
1996
1997         if (rec->data)
1998                 seq_printf(m, ":%p", rec->data);
1999         seq_putc(m, '\n');
2000
2001         return 0;
2002 }
2003
2004 static void *
2005 t_next(struct seq_file *m, void *v, loff_t *pos)
2006 {
2007         struct ftrace_iterator *iter = m->private;
2008         struct ftrace_ops *ops = &global_ops;
2009         struct dyn_ftrace *rec = NULL;
2010
2011         if (unlikely(ftrace_disabled))
2012                 return NULL;
2013
2014         if (iter->flags & FTRACE_ITER_HASH)
2015                 return t_hash_next(m, pos);
2016
2017         (*pos)++;
2018         iter->pos = iter->func_pos = *pos;
2019
2020         if (iter->flags & FTRACE_ITER_PRINTALL)
2021                 return t_hash_start(m, pos);
2022
2023  retry:
2024         if (iter->idx >= iter->pg->index) {
2025                 if (iter->pg->next) {
2026                         iter->pg = iter->pg->next;
2027                         iter->idx = 0;
2028                         goto retry;
2029                 }
2030         } else {
2031                 rec = &iter->pg->records[iter->idx++];
2032                 if ((rec->flags & FTRACE_FL_FREE) ||
2033
2034                     ((iter->flags & FTRACE_ITER_FILTER) &&
2035                      !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2036
2037                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
2038                      !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2039
2040                     ((iter->flags & FTRACE_ITER_ENABLED) &&
2041                      !(rec->flags & ~FTRACE_FL_MASK))) {
2042
2043                         rec = NULL;
2044                         goto retry;
2045                 }
2046         }
2047
2048         if (!rec)
2049                 return t_hash_start(m, pos);
2050
2051         iter->func = rec;
2052
2053         return iter;
2054 }
2055
2056 static void reset_iter_read(struct ftrace_iterator *iter)
2057 {
2058         iter->pos = 0;
2059         iter->func_pos = 0;
2060         iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2061 }
2062
2063 static void *t_start(struct seq_file *m, loff_t *pos)
2064 {
2065         struct ftrace_iterator *iter = m->private;
2066         struct ftrace_ops *ops = &global_ops;
2067         void *p = NULL;
2068         loff_t l;
2069
2070         mutex_lock(&ftrace_lock);
2071
2072         if (unlikely(ftrace_disabled))
2073                 return NULL;
2074
2075         /*
2076          * If an lseek was done, then reset and start from beginning.
2077          */
2078         if (*pos < iter->pos)
2079                 reset_iter_read(iter);
2080
2081         /*
2082          * For set_ftrace_filter reading, if we have the filter
2083          * off, we can short cut and just print out that all
2084          * functions are enabled.
2085          */
2086         if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) {
2087                 if (*pos > 0)
2088                         return t_hash_start(m, pos);
2089                 iter->flags |= FTRACE_ITER_PRINTALL;
2090                 /* reset in case of seek/pread */
2091                 iter->flags &= ~FTRACE_ITER_HASH;
2092                 return iter;
2093         }
2094
2095         if (iter->flags & FTRACE_ITER_HASH)
2096                 return t_hash_start(m, pos);
2097
2098         /*
2099          * Unfortunately, we need to restart at ftrace_pages_start
2100          * every time we let go of the ftrace_mutex. This is because
2101          * those pointers can change without the lock.
2102          */
2103         iter->pg = ftrace_pages_start;
2104         iter->idx = 0;
2105         for (l = 0; l <= *pos; ) {
2106                 p = t_next(m, p, &l);
2107                 if (!p)
2108                         break;
2109         }
2110
2111         if (!p) {
2112                 if (iter->flags & FTRACE_ITER_FILTER)
2113                         return t_hash_start(m, pos);
2114
2115                 return NULL;
2116         }
2117
2118         return iter;
2119 }
2120
2121 static void t_stop(struct seq_file *m, void *p)
2122 {
2123         mutex_unlock(&ftrace_lock);
2124 }
2125
2126 static int t_show(struct seq_file *m, void *v)
2127 {
2128         struct ftrace_iterator *iter = m->private;
2129         struct dyn_ftrace *rec;
2130
2131         if (iter->flags & FTRACE_ITER_HASH)
2132                 return t_hash_show(m, iter);
2133
2134         if (iter->flags & FTRACE_ITER_PRINTALL) {
2135                 seq_printf(m, "#### all functions enabled ####\n");
2136                 return 0;
2137         }
2138
2139         rec = iter->func;
2140
2141         if (!rec)
2142                 return 0;
2143
2144         seq_printf(m, "%ps", (void *)rec->ip);
2145         if (iter->flags & FTRACE_ITER_ENABLED)
2146                 seq_printf(m, " (%ld)",
2147                            rec->flags & ~FTRACE_FL_MASK);
2148         seq_printf(m, "\n");
2149
2150         return 0;
2151 }
2152
2153 static const struct seq_operations show_ftrace_seq_ops = {
2154         .start = t_start,
2155         .next = t_next,
2156         .stop = t_stop,
2157         .show = t_show,
2158 };
2159
2160 static int
2161 ftrace_avail_open(struct inode *inode, struct file *file)
2162 {
2163         struct ftrace_iterator *iter;
2164         int ret;
2165
2166         if (unlikely(ftrace_disabled))
2167                 return -ENODEV;
2168
2169         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2170         if (!iter)
2171                 return -ENOMEM;
2172
2173         iter->pg = ftrace_pages_start;
2174
2175         ret = seq_open(file, &show_ftrace_seq_ops);
2176         if (!ret) {
2177                 struct seq_file *m = file->private_data;
2178
2179                 m->private = iter;
2180         } else {
2181                 kfree(iter);
2182         }
2183
2184         return ret;
2185 }
2186
2187 static int
2188 ftrace_enabled_open(struct inode *inode, struct file *file)
2189 {
2190         struct ftrace_iterator *iter;
2191         int ret;
2192
2193         if (unlikely(ftrace_disabled))
2194                 return -ENODEV;
2195
2196         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2197         if (!iter)
2198                 return -ENOMEM;
2199
2200         iter->pg = ftrace_pages_start;
2201         iter->flags = FTRACE_ITER_ENABLED;
2202
2203         ret = seq_open(file, &show_ftrace_seq_ops);
2204         if (!ret) {
2205                 struct seq_file *m = file->private_data;
2206
2207                 m->private = iter;
2208         } else {
2209                 kfree(iter);
2210         }
2211
2212         return ret;
2213 }
2214
2215 static void ftrace_filter_reset(struct ftrace_hash *hash)
2216 {
2217         mutex_lock(&ftrace_lock);
2218         ftrace_hash_clear(hash);
2219         mutex_unlock(&ftrace_lock);
2220 }
2221
2222 static int
2223 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2224                   struct inode *inode, struct file *file)
2225 {
2226         struct ftrace_iterator *iter;
2227         struct ftrace_hash *hash;
2228         int ret = 0;
2229
2230         if (unlikely(ftrace_disabled))
2231                 return -ENODEV;
2232
2233         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2234         if (!iter)
2235                 return -ENOMEM;
2236
2237         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2238                 kfree(iter);
2239                 return -ENOMEM;
2240         }
2241
2242         if (flag & FTRACE_ITER_NOTRACE)
2243                 hash = ops->notrace_hash;
2244         else
2245                 hash = ops->filter_hash;
2246
2247         iter->ops = ops;
2248         iter->flags = flag;
2249
2250         if (file->f_mode & FMODE_WRITE) {
2251                 mutex_lock(&ftrace_lock);
2252                 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2253                 mutex_unlock(&ftrace_lock);
2254
2255                 if (!iter->hash) {
2256                         trace_parser_put(&iter->parser);
2257                         kfree(iter);
2258                         return -ENOMEM;
2259                 }
2260         }
2261
2262         mutex_lock(&ftrace_regex_lock);
2263
2264         if ((file->f_mode & FMODE_WRITE) &&
2265             (file->f_flags & O_TRUNC))
2266                 ftrace_filter_reset(iter->hash);
2267
2268         if (file->f_mode & FMODE_READ) {
2269                 iter->pg = ftrace_pages_start;
2270
2271                 ret = seq_open(file, &show_ftrace_seq_ops);
2272                 if (!ret) {
2273                         struct seq_file *m = file->private_data;
2274                         m->private = iter;
2275                 } else {
2276                         /* Failed */
2277                         free_ftrace_hash(iter->hash);
2278                         trace_parser_put(&iter->parser);
2279                         kfree(iter);
2280                 }
2281         } else
2282                 file->private_data = iter;
2283         mutex_unlock(&ftrace_regex_lock);
2284
2285         return ret;
2286 }
2287
2288 static int
2289 ftrace_filter_open(struct inode *inode, struct file *file)
2290 {
2291         return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER,
2292                                  inode, file);
2293 }
2294
2295 static int
2296 ftrace_notrace_open(struct inode *inode, struct file *file)
2297 {
2298         return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2299                                  inode, file);
2300 }
2301
2302 static loff_t
2303 ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
2304 {
2305         loff_t ret;
2306
2307         if (file->f_mode & FMODE_READ)
2308                 ret = seq_lseek(file, offset, origin);
2309         else
2310                 file->f_pos = ret = 1;
2311
2312         return ret;
2313 }
2314
2315 static int ftrace_match(char *str, char *regex, int len, int type)
2316 {
2317         int matched = 0;
2318         int slen;
2319
2320         switch (type) {
2321         case MATCH_FULL:
2322                 if (strcmp(str, regex) == 0)
2323                         matched = 1;
2324                 break;
2325         case MATCH_FRONT_ONLY:
2326                 if (strncmp(str, regex, len) == 0)
2327                         matched = 1;
2328                 break;
2329         case MATCH_MIDDLE_ONLY:
2330                 if (strstr(str, regex))
2331                         matched = 1;
2332                 break;
2333         case MATCH_END_ONLY:
2334                 slen = strlen(str);
2335                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2336                         matched = 1;
2337                 break;
2338         }
2339
2340         return matched;
2341 }
2342
2343 static int
2344 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2345 {
2346         struct ftrace_func_entry *entry;
2347         int ret = 0;
2348
2349         entry = ftrace_lookup_ip(hash, rec->ip);
2350         if (not) {
2351                 /* Do nothing if it doesn't exist */
2352                 if (!entry)
2353                         return 0;
2354
2355                 free_hash_entry(hash, entry);
2356         } else {
2357                 /* Do nothing if it exists */
2358                 if (entry)
2359                         return 0;
2360
2361                 ret = add_hash_entry(hash, rec->ip);
2362         }
2363         return ret;
2364 }
2365
2366 static int
2367 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2368                     char *regex, int len, int type)
2369 {
2370         char str[KSYM_SYMBOL_LEN];
2371         char *modname;
2372
2373         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2374
2375         if (mod) {
2376                 /* module lookup requires matching the module */
2377                 if (!modname || strcmp(modname, mod))
2378                         return 0;
2379
2380                 /* blank search means to match all funcs in the mod */
2381                 if (!len)
2382                         return 1;
2383         }
2384
2385         return ftrace_match(str, regex, len, type);
2386 }
2387
2388 static int
2389 match_records(struct ftrace_hash *hash, char *buff,
2390               int len, char *mod, int not)
2391 {
2392         unsigned search_len = 0;
2393         struct ftrace_page *pg;
2394         struct dyn_ftrace *rec;
2395         int type = MATCH_FULL;
2396         char *search = buff;
2397         int found = 0;
2398         int ret;
2399
2400         if (len) {
2401                 type = filter_parse_regex(buff, len, &search, &not);
2402                 search_len = strlen(search);
2403         }
2404
2405         mutex_lock(&ftrace_lock);
2406
2407         if (unlikely(ftrace_disabled))
2408                 goto out_unlock;
2409
2410         do_for_each_ftrace_rec(pg, rec) {
2411
2412                 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2413                         ret = enter_record(hash, rec, not);
2414                         if (ret < 0) {
2415                                 found = ret;
2416                                 goto out_unlock;
2417                         }
2418                         found = 1;
2419                 }
2420         } while_for_each_ftrace_rec();
2421  out_unlock:
2422         mutex_unlock(&ftrace_lock);
2423
2424         return found;
2425 }
2426
2427 static int
2428 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2429 {
2430         return match_records(hash, buff, len, NULL, 0);
2431 }
2432
2433 static int
2434 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2435 {
2436         int not = 0;
2437
2438         /* blank or '*' mean the same */
2439         if (strcmp(buff, "*") == 0)
2440                 buff[0] = 0;
2441
2442         /* handle the case of 'dont filter this module' */
2443         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2444                 buff[0] = 0;
2445                 not = 1;
2446         }
2447
2448         return match_records(hash, buff, strlen(buff), mod, not);
2449 }
2450
2451 /*
2452  * We register the module command as a template to show others how
2453  * to register the a command as well.
2454  */
2455
2456 static int
2457 ftrace_mod_callback(struct ftrace_hash *hash,
2458                     char *func, char *cmd, char *param, int enable)
2459 {
2460         char *mod;
2461         int ret = -EINVAL;
2462
2463         /*
2464          * cmd == 'mod' because we only registered this func
2465          * for the 'mod' ftrace_func_command.
2466          * But if you register one func with multiple commands,
2467          * you can tell which command was used by the cmd
2468          * parameter.
2469          */
2470
2471         /* we must have a module name */
2472         if (!param)
2473                 return ret;
2474
2475         mod = strsep(&param, ":");
2476         if (!strlen(mod))
2477                 return ret;
2478
2479         ret = ftrace_match_module_records(hash, func, mod);
2480         if (!ret)
2481                 ret = -EINVAL;
2482         if (ret < 0)
2483                 return ret;
2484
2485         return 0;
2486 }
2487
2488 static struct ftrace_func_command ftrace_mod_cmd = {
2489         .name                   = "mod",
2490         .func                   = ftrace_mod_callback,
2491 };
2492
2493 static int __init ftrace_mod_cmd_init(void)
2494 {
2495         return register_ftrace_command(&ftrace_mod_cmd);
2496 }
2497 device_initcall(ftrace_mod_cmd_init);
2498
2499 static void
2500 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
2501 {
2502         struct ftrace_func_probe *entry;
2503         struct hlist_head *hhd;
2504         struct hlist_node *n;
2505         unsigned long key;
2506
2507         key = hash_long(ip, FTRACE_HASH_BITS);
2508
2509         hhd = &ftrace_func_hash[key];
2510
2511         if (hlist_empty(hhd))
2512                 return;
2513
2514         /*
2515          * Disable preemption for these calls to prevent a RCU grace
2516          * period. This syncs the hash iteration and freeing of items
2517          * on the hash. rcu_read_lock is too dangerous here.
2518          */
2519         preempt_disable_notrace();
2520         hlist_for_each_entry_rcu(entry, n, hhd, node) {
2521                 if (entry->ip == ip)
2522                         entry->ops->func(ip, parent_ip, &entry->data);
2523         }
2524         preempt_enable_notrace();
2525 }
2526
2527 static struct ftrace_ops trace_probe_ops __read_mostly =
2528 {
2529         .func           = function_trace_probe_call,
2530 };
2531
2532 static int ftrace_probe_registered;
2533
2534 static void __enable_ftrace_function_probe(void)
2535 {
2536         int ret;
2537         int i;
2538
2539         if (ftrace_probe_registered)
2540                 return;
2541
2542         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2543                 struct hlist_head *hhd = &ftrace_func_hash[i];
2544                 if (hhd->first)
2545                         break;
2546         }
2547         /* Nothing registered? */
2548         if (i == FTRACE_FUNC_HASHSIZE)
2549                 return;
2550
2551         ret = __register_ftrace_function(&trace_probe_ops);
2552         if (!ret)
2553                 ret = ftrace_startup(&trace_probe_ops, 0);
2554
2555         ftrace_probe_registered = 1;
2556 }
2557
2558 static void __disable_ftrace_function_probe(void)
2559 {
2560         int ret;
2561         int i;
2562
2563         if (!ftrace_probe_registered)
2564                 return;
2565
2566         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2567                 struct hlist_head *hhd = &ftrace_func_hash[i];
2568                 if (hhd->first)
2569                         return;
2570         }
2571
2572         /* no more funcs left */
2573         ret = __unregister_ftrace_function(&trace_probe_ops);
2574         if (!ret)
2575                 ftrace_shutdown(&trace_probe_ops, 0);
2576
2577         ftrace_probe_registered = 0;
2578 }
2579
2580
2581 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2582 {
2583         struct ftrace_func_probe *entry =
2584                 container_of(rhp, struct ftrace_func_probe, rcu);
2585
2586         if (entry->ops->free)
2587                 entry->ops->free(&entry->data);
2588         kfree(entry);
2589 }
2590
2591
2592 int
2593 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2594                               void *data)
2595 {
2596         struct ftrace_func_probe *entry;
2597         struct ftrace_page *pg;
2598         struct dyn_ftrace *rec;
2599         int type, len, not;
2600         unsigned long key;
2601         int count = 0;
2602         char *search;
2603
2604         type = filter_parse_regex(glob, strlen(glob), &search, &not);
2605         len = strlen(search);
2606
2607         /* we do not support '!' for function probes */
2608         if (WARN_ON(not))
2609                 return -EINVAL;
2610
2611         mutex_lock(&ftrace_lock);
2612
2613         if (unlikely(ftrace_disabled))
2614                 goto out_unlock;
2615
2616         do_for_each_ftrace_rec(pg, rec) {
2617
2618                 if (!ftrace_match_record(rec, NULL, search, len, type))
2619                         continue;
2620
2621                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2622                 if (!entry) {
2623                         /* If we did not process any, then return error */
2624                         if (!count)
2625                                 count = -ENOMEM;
2626                         goto out_unlock;
2627                 }
2628
2629                 count++;
2630
2631                 entry->data = data;
2632
2633                 /*
2634                  * The caller might want to do something special
2635                  * for each function we find. We call the callback
2636                  * to give the caller an opportunity to do so.
2637                  */
2638                 if (ops->callback) {
2639                         if (ops->callback(rec->ip, &entry->data) < 0) {
2640                                 /* caller does not like this func */
2641                                 kfree(entry);
2642                                 continue;
2643                         }
2644                 }
2645
2646                 entry->ops = ops;
2647                 entry->ip = rec->ip;
2648
2649                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2650                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2651
2652         } while_for_each_ftrace_rec();
2653         __enable_ftrace_function_probe();
2654
2655  out_unlock:
2656         mutex_unlock(&ftrace_lock);
2657
2658         return count;
2659 }
2660
2661 enum {
2662         PROBE_TEST_FUNC         = 1,
2663         PROBE_TEST_DATA         = 2
2664 };
2665
2666 static void
2667 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2668                                   void *data, int flags)
2669 {
2670         struct ftrace_func_probe *entry;
2671         struct hlist_node *n, *tmp;
2672         char str[KSYM_SYMBOL_LEN];
2673         int type = MATCH_FULL;
2674         int i, len = 0;
2675         char *search;
2676
2677         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2678                 glob = NULL;
2679         else if (glob) {
2680                 int not;
2681
2682                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
2683                 len = strlen(search);
2684
2685                 /* we do not support '!' for function probes */
2686                 if (WARN_ON(not))
2687                         return;
2688         }
2689
2690         mutex_lock(&ftrace_lock);
2691         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2692                 struct hlist_head *hhd = &ftrace_func_hash[i];
2693
2694                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2695
2696                         /* break up if statements for readability */
2697                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2698                                 continue;
2699
2700                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
2701                                 continue;
2702
2703                         /* do this last, since it is the most expensive */
2704                         if (glob) {
2705                                 kallsyms_lookup(entry->ip, NULL, NULL,
2706                                                 NULL, str);
2707                                 if (!ftrace_match(str, glob, len, type))
2708                                         continue;
2709                         }
2710
2711                         hlist_del_rcu(&entry->node);
2712                         call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu);
2713                 }
2714         }
2715         __disable_ftrace_function_probe();
2716         mutex_unlock(&ftrace_lock);
2717 }
2718
2719 void
2720 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2721                                 void *data)
2722 {
2723         __unregister_ftrace_function_probe(glob, ops, data,
2724                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
2725 }
2726
2727 void
2728 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2729 {
2730         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2731 }
2732
2733 void unregister_ftrace_function_probe_all(char *glob)
2734 {
2735         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2736 }
2737
2738 static LIST_HEAD(ftrace_commands);
2739 static DEFINE_MUTEX(ftrace_cmd_mutex);
2740
2741 int register_ftrace_command(struct ftrace_func_command *cmd)
2742 {
2743         struct ftrace_func_command *p;
2744         int ret = 0;
2745
2746         mutex_lock(&ftrace_cmd_mutex);
2747         list_for_each_entry(p, &ftrace_commands, list) {
2748                 if (strcmp(cmd->name, p->name) == 0) {
2749                         ret = -EBUSY;
2750                         goto out_unlock;
2751                 }
2752         }
2753         list_add(&cmd->list, &ftrace_commands);
2754  out_unlock:
2755         mutex_unlock(&ftrace_cmd_mutex);
2756
2757         return ret;
2758 }
2759
2760 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2761 {
2762         struct ftrace_func_command *p, *n;
2763         int ret = -ENODEV;
2764
2765         mutex_lock(&ftrace_cmd_mutex);
2766         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2767                 if (strcmp(cmd->name, p->name) == 0) {
2768                         ret = 0;
2769                         list_del_init(&p->list);
2770                         goto out_unlock;
2771                 }
2772         }
2773  out_unlock:
2774         mutex_unlock(&ftrace_cmd_mutex);
2775
2776         return ret;
2777 }
2778
2779 static int ftrace_process_regex(struct ftrace_hash *hash,
2780                                 char *buff, int len, int enable)
2781 {
2782         char *func, *command, *next = buff;
2783         struct ftrace_func_command *p;
2784         int ret = -EINVAL;
2785
2786         func = strsep(&next, ":");
2787
2788         if (!next) {
2789                 ret = ftrace_match_records(hash, func, len);
2790                 if (!ret)
2791                         ret = -EINVAL;
2792                 if (ret < 0)
2793                         return ret;
2794                 return 0;
2795         }
2796
2797         /* command found */
2798
2799         command = strsep(&next, ":");
2800
2801         mutex_lock(&ftrace_cmd_mutex);
2802         list_for_each_entry(p, &ftrace_commands, list) {
2803                 if (strcmp(p->name, command) == 0) {
2804                         ret = p->func(hash, func, command, next, enable);
2805                         goto out_unlock;
2806                 }
2807         }
2808  out_unlock:
2809         mutex_unlock(&ftrace_cmd_mutex);
2810
2811         return ret;
2812 }
2813
2814 static ssize_t
2815 ftrace_regex_write(struct file *file, const char __user *ubuf,
2816                    size_t cnt, loff_t *ppos, int enable)
2817 {
2818         struct ftrace_iterator *iter;
2819         struct trace_parser *parser;
2820         ssize_t ret, read;
2821
2822         if (!cnt)
2823                 return 0;
2824
2825         mutex_lock(&ftrace_regex_lock);
2826
2827         ret = -ENODEV;
2828         if (unlikely(ftrace_disabled))
2829                 goto out_unlock;
2830
2831         if (file->f_mode & FMODE_READ) {
2832                 struct seq_file *m = file->private_data;
2833                 iter = m->private;
2834         } else
2835                 iter = file->private_data;
2836
2837         parser = &iter->parser;
2838         read = trace_get_user(parser, ubuf, cnt, ppos);
2839
2840         if (read >= 0 && trace_parser_loaded(parser) &&
2841             !trace_parser_cont(parser)) {
2842                 ret = ftrace_process_regex(iter->hash, parser->buffer,
2843                                            parser->idx, enable);
2844                 trace_parser_clear(parser);
2845                 if (ret)
2846                         goto out_unlock;
2847         }
2848
2849         ret = read;
2850 out_unlock:
2851         mutex_unlock(&ftrace_regex_lock);
2852
2853         return ret;
2854 }
2855
2856 static ssize_t
2857 ftrace_filter_write(struct file *file, const char __user *ubuf,
2858                     size_t cnt, loff_t *ppos)
2859 {
2860         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2861 }
2862
2863 static ssize_t
2864 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2865                      size_t cnt, loff_t *ppos)
2866 {
2867         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2868 }
2869
2870 static int
2871 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2872                  int reset, int enable)
2873 {
2874         struct ftrace_hash **orig_hash;
2875         struct ftrace_hash *hash;
2876         int ret;
2877
2878         /* All global ops uses the global ops filters */
2879         if (ops->flags & FTRACE_OPS_FL_GLOBAL)
2880                 ops = &global_ops;
2881
2882         if (unlikely(ftrace_disabled))
2883                 return -ENODEV;
2884
2885         if (enable)
2886                 orig_hash = &ops->filter_hash;
2887         else
2888                 orig_hash = &ops->notrace_hash;
2889
2890         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2891         if (!hash)
2892                 return -ENOMEM;
2893
2894         mutex_lock(&ftrace_regex_lock);
2895         if (reset)
2896                 ftrace_filter_reset(hash);
2897         if (buf)
2898                 ftrace_match_records(hash, buf, len);
2899
2900         mutex_lock(&ftrace_lock);
2901         ret = ftrace_hash_move(ops, enable, orig_hash, hash);
2902         if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
2903             && ftrace_enabled)
2904                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2905
2906         mutex_unlock(&ftrace_lock);
2907
2908         mutex_unlock(&ftrace_regex_lock);
2909
2910         free_ftrace_hash(hash);
2911         return ret;
2912 }
2913
2914 /**
2915  * ftrace_set_filter - set a function to filter on in ftrace
2916  * @ops - the ops to set the filter with
2917  * @buf - the string that holds the function filter text.
2918  * @len - the length of the string.
2919  * @reset - non zero to reset all filters before applying this filter.
2920  *
2921  * Filters denote which functions should be enabled when tracing is enabled.
2922  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2923  */
2924 void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
2925   &