1 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bpf.h>
12 #include <linux/bpf_perf_event.h>
13 #include <linux/filter.h>
14 #include <linux/uaccess.h>
15 #include <linux/ctype.h>
18 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
21 * trace_call_bpf - invoke BPF program
22 * @call: tracepoint event
23 * @ctx: opaque context pointer
25 * kprobe handlers execute BPF programs via this helper.
26 * Can be used from static tracepoints in the future.
28 * Return: BPF programs always return an integer which is interpreted by
30 * 0 - return from kprobe (event is filtered out)
31 * 1 - store kprobe event into ring buffer
32 * Other values are reserved and currently alias to 1
34 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
38 if (in_nmi()) /* not supported yet */
43 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
45 * since some bpf program is already running on this cpu,
46 * don't call into another bpf program (same or different)
47 * and don't send kprobe event into ring-buffer,
55 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
56 * to all call sites, we did a bpf_prog_array_valid() there to check
57 * whether call->prog_array is empty or not, which is
58 * a heurisitc to speed up execution.
60 * If bpf_prog_array_valid() fetched prog_array was
61 * non-NULL, we go into trace_call_bpf() and do the actual
62 * proper rcu_dereference() under RCU lock.
63 * If it turns out that prog_array is NULL then, we bail out.
64 * For the opposite, if the bpf_prog_array_valid() fetched pointer
65 * was NULL, you'll skip the prog_array with the risk of missing
66 * out of events when it was updated in between this and the
67 * rcu_dereference() which is accepted risk.
69 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
72 __this_cpu_dec(bpf_prog_active);
77 EXPORT_SYMBOL_GPL(trace_call_bpf);
79 BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
83 if (unlikely(size == 0))
86 ret = probe_kernel_read(dst, unsafe_ptr, size);
87 if (unlikely(ret < 0))
94 static const struct bpf_func_proto bpf_probe_read_proto = {
95 .func = bpf_probe_read,
97 .ret_type = RET_INTEGER,
98 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
99 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
100 .arg3_type = ARG_ANYTHING,
103 BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
107 * Ensure we're in user context which is safe for the helper to
108 * run. This helper has no business in a kthread.
110 * access_ok() should prevent writing to non-user memory, but in
111 * some situations (nommu, temporary switch, etc) access_ok() does
112 * not provide enough validation, hence the check on KERNEL_DS.
115 if (unlikely(in_interrupt() ||
116 current->flags & (PF_KTHREAD | PF_EXITING)))
118 if (unlikely(uaccess_kernel()))
120 if (!access_ok(VERIFY_WRITE, unsafe_ptr, size))
123 return probe_kernel_write(unsafe_ptr, src, size);
126 static const struct bpf_func_proto bpf_probe_write_user_proto = {
127 .func = bpf_probe_write_user,
129 .ret_type = RET_INTEGER,
130 .arg1_type = ARG_ANYTHING,
131 .arg2_type = ARG_PTR_TO_MEM,
132 .arg3_type = ARG_CONST_SIZE,
135 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
137 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
138 current->comm, task_pid_nr(current));
140 return &bpf_probe_write_user_proto;
144 * Only limited trace_printk() conversion specifiers allowed:
145 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
147 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
148 u64, arg2, u64, arg3)
150 bool str_seen = false;
158 * bpf_check()->check_func_arg()->check_stack_boundary()
159 * guarantees that fmt points to bpf program stack,
160 * fmt_size bytes of it were initialized and fmt_size > 0
162 if (fmt[--fmt_size] != 0)
165 /* check format string for allowed specifiers */
166 for (i = 0; i < fmt_size; i++) {
167 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
176 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
181 } else if (fmt[i] == 'p' || fmt[i] == 's') {
184 if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
187 if (fmt[i - 1] == 's') {
189 /* allow only one '%s' per fmt string */
208 strncpy_from_unsafe(buf,
209 (void *) (long) unsafe_addr,
220 if (fmt[i] != 'i' && fmt[i] != 'd' &&
221 fmt[i] != 'u' && fmt[i] != 'x')
226 /* Horrid workaround for getting va_list handling working with different
227 * argument type combinations generically for 32 and 64 bit archs.
229 #define __BPF_TP_EMIT() __BPF_ARG3_TP()
230 #define __BPF_TP(...) \
231 __trace_printk(1 /* Fake ip will not be printed. */, \
234 #define __BPF_ARG1_TP(...) \
235 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
236 ? __BPF_TP(arg1, ##__VA_ARGS__) \
237 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
238 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
239 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
241 #define __BPF_ARG2_TP(...) \
242 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
243 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
244 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
245 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
246 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
248 #define __BPF_ARG3_TP(...) \
249 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
250 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
251 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
252 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
253 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
255 return __BPF_TP_EMIT();
258 static const struct bpf_func_proto bpf_trace_printk_proto = {
259 .func = bpf_trace_printk,
261 .ret_type = RET_INTEGER,
262 .arg1_type = ARG_PTR_TO_MEM,
263 .arg2_type = ARG_CONST_SIZE,
266 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
269 * this program might be calling bpf_trace_printk,
270 * so allocate per-cpu printk buffers
272 trace_printk_init_buffers();
274 return &bpf_trace_printk_proto;
277 static __always_inline int
278 get_map_perf_counter(struct bpf_map *map, u64 flags,
279 u64 *value, u64 *enabled, u64 *running)
281 struct bpf_array *array = container_of(map, struct bpf_array, map);
282 unsigned int cpu = smp_processor_id();
283 u64 index = flags & BPF_F_INDEX_MASK;
284 struct bpf_event_entry *ee;
286 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
288 if (index == BPF_F_CURRENT_CPU)
290 if (unlikely(index >= array->map.max_entries))
293 ee = READ_ONCE(array->ptrs[index]);
297 return perf_event_read_local(ee->event, value, enabled, running);
300 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
305 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
307 * this api is ugly since we miss [-22..-2] range of valid
308 * counter values, but that's uapi
315 static const struct bpf_func_proto bpf_perf_event_read_proto = {
316 .func = bpf_perf_event_read,
318 .ret_type = RET_INTEGER,
319 .arg1_type = ARG_CONST_MAP_PTR,
320 .arg2_type = ARG_ANYTHING,
323 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
324 struct bpf_perf_event_value *, buf, u32, size)
328 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
330 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
336 memset(buf, 0, size);
340 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
341 .func = bpf_perf_event_read_value,
343 .ret_type = RET_INTEGER,
344 .arg1_type = ARG_CONST_MAP_PTR,
345 .arg2_type = ARG_ANYTHING,
346 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
347 .arg4_type = ARG_CONST_SIZE,
350 static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd);
352 static __always_inline u64
353 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
354 u64 flags, struct perf_raw_record *raw)
356 struct bpf_array *array = container_of(map, struct bpf_array, map);
357 struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd);
358 unsigned int cpu = smp_processor_id();
359 u64 index = flags & BPF_F_INDEX_MASK;
360 struct bpf_event_entry *ee;
361 struct perf_event *event;
363 if (index == BPF_F_CURRENT_CPU)
365 if (unlikely(index >= array->map.max_entries))
368 ee = READ_ONCE(array->ptrs[index]);
373 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
374 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
377 if (unlikely(event->oncpu != cpu))
380 perf_sample_data_init(sd, 0, 0);
382 perf_event_output(event, sd, regs);
386 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
387 u64, flags, void *, data, u64, size)
389 struct perf_raw_record raw = {
396 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
399 return __bpf_perf_event_output(regs, map, flags, &raw);
402 static const struct bpf_func_proto bpf_perf_event_output_proto = {
403 .func = bpf_perf_event_output,
405 .ret_type = RET_INTEGER,
406 .arg1_type = ARG_PTR_TO_CTX,
407 .arg2_type = ARG_CONST_MAP_PTR,
408 .arg3_type = ARG_ANYTHING,
409 .arg4_type = ARG_PTR_TO_MEM,
410 .arg5_type = ARG_CONST_SIZE,
413 static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
415 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
416 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
418 struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
419 struct perf_raw_frag frag = {
424 struct perf_raw_record raw = {
427 .next = ctx_size ? &frag : NULL,
434 perf_fetch_caller_regs(regs);
436 return __bpf_perf_event_output(regs, map, flags, &raw);
439 BPF_CALL_0(bpf_get_current_task)
441 return (long) current;
444 static const struct bpf_func_proto bpf_get_current_task_proto = {
445 .func = bpf_get_current_task,
447 .ret_type = RET_INTEGER,
450 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
452 struct bpf_array *array = container_of(map, struct bpf_array, map);
455 if (unlikely(in_interrupt()))
457 if (unlikely(idx >= array->map.max_entries))
460 cgrp = READ_ONCE(array->ptrs[idx]);
464 return task_under_cgroup_hierarchy(current, cgrp);
467 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
468 .func = bpf_current_task_under_cgroup,
470 .ret_type = RET_INTEGER,
471 .arg1_type = ARG_CONST_MAP_PTR,
472 .arg2_type = ARG_ANYTHING,
475 BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
476 const void *, unsafe_ptr)
481 * The strncpy_from_unsafe() call will likely not fill the entire
482 * buffer, but that's okay in this circumstance as we're probing
483 * arbitrary memory anyway similar to bpf_probe_read() and might
484 * as well probe the stack. Thus, memory is explicitly cleared
485 * only in error case, so that improper users ignoring return
486 * code altogether don't copy garbage; otherwise length of string
487 * is returned that can be used for bpf_perf_event_output() et al.
489 ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
490 if (unlikely(ret < 0))
491 memset(dst, 0, size);
496 static const struct bpf_func_proto bpf_probe_read_str_proto = {
497 .func = bpf_probe_read_str,
499 .ret_type = RET_INTEGER,
500 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
501 .arg2_type = ARG_CONST_SIZE,
502 .arg3_type = ARG_ANYTHING,
505 static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
508 case BPF_FUNC_map_lookup_elem:
509 return &bpf_map_lookup_elem_proto;
510 case BPF_FUNC_map_update_elem:
511 return &bpf_map_update_elem_proto;
512 case BPF_FUNC_map_delete_elem:
513 return &bpf_map_delete_elem_proto;
514 case BPF_FUNC_probe_read:
515 return &bpf_probe_read_proto;
516 case BPF_FUNC_ktime_get_ns:
517 return &bpf_ktime_get_ns_proto;
518 case BPF_FUNC_tail_call:
519 return &bpf_tail_call_proto;
520 case BPF_FUNC_get_current_pid_tgid:
521 return &bpf_get_current_pid_tgid_proto;
522 case BPF_FUNC_get_current_task:
523 return &bpf_get_current_task_proto;
524 case BPF_FUNC_get_current_uid_gid:
525 return &bpf_get_current_uid_gid_proto;
526 case BPF_FUNC_get_current_comm:
527 return &bpf_get_current_comm_proto;
528 case BPF_FUNC_trace_printk:
529 return bpf_get_trace_printk_proto();
530 case BPF_FUNC_get_smp_processor_id:
531 return &bpf_get_smp_processor_id_proto;
532 case BPF_FUNC_get_numa_node_id:
533 return &bpf_get_numa_node_id_proto;
534 case BPF_FUNC_perf_event_read:
535 return &bpf_perf_event_read_proto;
536 case BPF_FUNC_probe_write_user:
537 return bpf_get_probe_write_proto();
538 case BPF_FUNC_current_task_under_cgroup:
539 return &bpf_current_task_under_cgroup_proto;
540 case BPF_FUNC_get_prandom_u32:
541 return &bpf_get_prandom_u32_proto;
542 case BPF_FUNC_probe_read_str:
543 return &bpf_probe_read_str_proto;
549 static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
552 case BPF_FUNC_perf_event_output:
553 return &bpf_perf_event_output_proto;
554 case BPF_FUNC_get_stackid:
555 return &bpf_get_stackid_proto;
556 case BPF_FUNC_perf_event_read_value:
557 return &bpf_perf_event_read_value_proto;
559 return tracing_func_proto(func_id);
563 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
564 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
565 struct bpf_insn_access_aux *info)
567 if (off < 0 || off >= sizeof(struct pt_regs))
569 if (type != BPF_READ)
574 * Assertion for 32 bit to make sure last 8 byte access
575 * (BPF_DW) to the last 4 byte member is disallowed.
577 if (off + size > sizeof(struct pt_regs))
583 const struct bpf_verifier_ops kprobe_verifier_ops = {
584 .get_func_proto = kprobe_prog_func_proto,
585 .is_valid_access = kprobe_prog_is_valid_access,
588 const struct bpf_prog_ops kprobe_prog_ops = {
591 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
592 u64, flags, void *, data, u64, size)
594 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
597 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
598 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
599 * from there and call the same bpf_perf_event_output() helper inline.
601 return ____bpf_perf_event_output(regs, map, flags, data, size);
604 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
605 .func = bpf_perf_event_output_tp,
607 .ret_type = RET_INTEGER,
608 .arg1_type = ARG_PTR_TO_CTX,
609 .arg2_type = ARG_CONST_MAP_PTR,
610 .arg3_type = ARG_ANYTHING,
611 .arg4_type = ARG_PTR_TO_MEM,
612 .arg5_type = ARG_CONST_SIZE,
615 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
618 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
621 * Same comment as in bpf_perf_event_output_tp(), only that this time
622 * the other helper's function body cannot be inlined due to being
623 * external, thus we need to call raw helper function.
625 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
629 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
630 .func = bpf_get_stackid_tp,
632 .ret_type = RET_INTEGER,
633 .arg1_type = ARG_PTR_TO_CTX,
634 .arg2_type = ARG_CONST_MAP_PTR,
635 .arg3_type = ARG_ANYTHING,
638 BPF_CALL_3(bpf_perf_prog_read_value_tp, struct bpf_perf_event_data_kern *, ctx,
639 struct bpf_perf_event_value *, buf, u32, size)
643 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
645 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
651 memset(buf, 0, size);
655 static const struct bpf_func_proto bpf_perf_prog_read_value_proto_tp = {
656 .func = bpf_perf_prog_read_value_tp,
658 .ret_type = RET_INTEGER,
659 .arg1_type = ARG_PTR_TO_CTX,
660 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
661 .arg3_type = ARG_CONST_SIZE,
664 static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
667 case BPF_FUNC_perf_event_output:
668 return &bpf_perf_event_output_proto_tp;
669 case BPF_FUNC_get_stackid:
670 return &bpf_get_stackid_proto_tp;
671 case BPF_FUNC_perf_prog_read_value:
672 return &bpf_perf_prog_read_value_proto_tp;
674 return tracing_func_proto(func_id);
678 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
679 struct bpf_insn_access_aux *info)
681 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
683 if (type != BPF_READ)
688 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
692 const struct bpf_verifier_ops tracepoint_verifier_ops = {
693 .get_func_proto = tp_prog_func_proto,
694 .is_valid_access = tp_prog_is_valid_access,
697 const struct bpf_prog_ops tracepoint_prog_ops = {
700 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
701 struct bpf_insn_access_aux *info)
703 const int size_sp = FIELD_SIZEOF(struct bpf_perf_event_data,
706 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
708 if (type != BPF_READ)
714 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
715 bpf_ctx_record_field_size(info, size_sp);
716 if (!bpf_ctx_narrow_access_ok(off, size, size_sp))
720 if (size != sizeof(long))
727 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
728 const struct bpf_insn *si,
729 struct bpf_insn *insn_buf,
730 struct bpf_prog *prog, u32 *target_size)
732 struct bpf_insn *insn = insn_buf;
735 case offsetof(struct bpf_perf_event_data, sample_period):
736 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
737 data), si->dst_reg, si->src_reg,
738 offsetof(struct bpf_perf_event_data_kern, data));
739 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
740 bpf_target_off(struct perf_sample_data, period, 8,
744 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
745 regs), si->dst_reg, si->src_reg,
746 offsetof(struct bpf_perf_event_data_kern, regs));
747 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
752 return insn - insn_buf;
755 const struct bpf_verifier_ops perf_event_verifier_ops = {
756 .get_func_proto = tp_prog_func_proto,
757 .is_valid_access = pe_prog_is_valid_access,
758 .convert_ctx_access = pe_prog_convert_ctx_access,
761 const struct bpf_prog_ops perf_event_prog_ops = {
764 static DEFINE_MUTEX(bpf_event_mutex);
766 int perf_event_attach_bpf_prog(struct perf_event *event,
767 struct bpf_prog *prog)
769 struct bpf_prog_array __rcu *old_array;
770 struct bpf_prog_array *new_array;
773 mutex_lock(&bpf_event_mutex);
778 old_array = event->tp_event->prog_array;
779 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
783 /* set the new array to event->tp_event and set event->prog */
785 rcu_assign_pointer(event->tp_event->prog_array, new_array);
786 bpf_prog_array_free(old_array);
789 mutex_unlock(&bpf_event_mutex);
793 void perf_event_detach_bpf_prog(struct perf_event *event)
795 struct bpf_prog_array __rcu *old_array;
796 struct bpf_prog_array *new_array;
799 mutex_lock(&bpf_event_mutex);
804 old_array = event->tp_event->prog_array;
805 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
807 bpf_prog_array_delete_safe(old_array, event->prog);
809 rcu_assign_pointer(event->tp_event->prog_array, new_array);
810 bpf_prog_array_free(old_array);
813 bpf_prog_put(event->prog);
817 mutex_unlock(&bpf_event_mutex);