1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/hash.h>
5 #include <linux/filter.h>
6 #include <linux/ftrace.h>
8 /* dummy _ops. The verifier will operate on target program's ops. */
9 const struct bpf_verifier_ops bpf_extension_verifier_ops = {
11 const struct bpf_prog_ops bpf_extension_prog_ops = {
14 /* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
15 #define TRAMPOLINE_HASH_BITS 10
16 #define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
18 static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
20 /* serializes access to trampoline_table */
21 static DEFINE_MUTEX(trampoline_mutex);
23 void *bpf_jit_alloc_exec_page(void)
27 image = bpf_jit_alloc_exec(PAGE_SIZE);
31 set_vm_flush_reset_perms(image);
32 /* Keep image as writeable. The alternative is to keep flipping ro/rw
33 * everytime new program is attached or detached.
35 set_memory_x((long)image, 1);
39 struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
41 struct bpf_trampoline *tr;
42 struct hlist_head *head;
46 mutex_lock(&trampoline_mutex);
47 head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)];
48 hlist_for_each_entry(tr, head, hlist) {
50 refcount_inc(&tr->refcnt);
54 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
58 /* is_root was checked earlier. No need for bpf_jit_charge_modmem() */
59 image = bpf_jit_alloc_exec_page();
67 INIT_HLIST_NODE(&tr->hlist);
68 hlist_add_head(&tr->hlist, head);
69 refcount_set(&tr->refcnt, 1);
70 mutex_init(&tr->mutex);
71 for (i = 0; i < BPF_TRAMP_MAX; i++)
72 INIT_HLIST_HEAD(&tr->progs_hlist[i]);
75 mutex_unlock(&trampoline_mutex);
79 static int is_ftrace_location(void *ip)
83 addr = ftrace_location((long)ip);
86 if (WARN_ON_ONCE(addr != (long)ip))
91 static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
93 void *ip = tr->func.addr;
96 if (tr->func.ftrace_managed)
97 ret = unregister_ftrace_direct((long)ip, (long)old_addr);
99 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
103 static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr)
105 void *ip = tr->func.addr;
108 if (tr->func.ftrace_managed)
109 ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr);
111 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
115 /* first time registering */
116 static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
118 void *ip = tr->func.addr;
121 ret = is_ftrace_location(ip);
124 tr->func.ftrace_managed = ret;
126 if (tr->func.ftrace_managed)
127 ret = register_ftrace_direct((long)ip, (long)new_addr);
129 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
133 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
134 * bytes on x86. Pick a number to fit into PAGE_SIZE / 2
136 #define BPF_MAX_TRAMP_PROGS 40
138 static int bpf_trampoline_update(struct bpf_trampoline *tr)
140 void *old_image = tr->image + ((tr->selector + 1) & 1) * PAGE_SIZE/2;
141 void *new_image = tr->image + (tr->selector & 1) * PAGE_SIZE/2;
142 struct bpf_prog *progs_to_run[BPF_MAX_TRAMP_PROGS];
143 int fentry_cnt = tr->progs_cnt[BPF_TRAMP_FENTRY];
144 int fexit_cnt = tr->progs_cnt[BPF_TRAMP_FEXIT];
145 struct bpf_prog **progs, **fentry, **fexit;
146 u32 flags = BPF_TRAMP_F_RESTORE_REGS;
147 struct bpf_prog_aux *aux;
150 if (fentry_cnt + fexit_cnt == 0) {
151 err = unregister_fentry(tr, old_image);
156 /* populate fentry progs */
157 fentry = progs = progs_to_run;
158 hlist_for_each_entry(aux, &tr->progs_hlist[BPF_TRAMP_FENTRY], tramp_hlist)
159 *progs++ = aux->prog;
161 /* populate fexit progs */
163 hlist_for_each_entry(aux, &tr->progs_hlist[BPF_TRAMP_FEXIT], tramp_hlist)
164 *progs++ = aux->prog;
167 flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
169 /* Though the second half of trampoline page is unused a task could be
170 * preempted in the middle of the first half of trampoline and two
171 * updates to trampoline would change the code from underneath the
172 * preempted task. Hence wait for tasks to voluntarily schedule or go
175 synchronize_rcu_tasks();
177 err = arch_prepare_bpf_trampoline(new_image, new_image + PAGE_SIZE / 2,
178 &tr->func.model, flags,
186 /* progs already running at this address */
187 err = modify_fentry(tr, old_image, new_image);
189 /* first time registering */
190 err = register_fentry(tr, new_image);
198 static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(enum bpf_attach_type t)
201 case BPF_TRACE_FENTRY:
202 return BPF_TRAMP_FENTRY;
203 case BPF_TRACE_FEXIT:
204 return BPF_TRAMP_FEXIT;
206 return BPF_TRAMP_REPLACE;
210 int bpf_trampoline_link_prog(struct bpf_prog *prog)
212 enum bpf_tramp_prog_type kind;
213 struct bpf_trampoline *tr;
217 tr = prog->aux->trampoline;
218 kind = bpf_attach_type_to_tramp(prog->expected_attach_type);
219 mutex_lock(&tr->mutex);
220 if (tr->extension_prog) {
221 /* cannot attach fentry/fexit if extension prog is attached.
222 * cannot overwrite extension prog either.
227 cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT];
228 if (kind == BPF_TRAMP_REPLACE) {
229 /* Cannot attach extension if fentry/fexit are in use. */
234 tr->extension_prog = prog;
235 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
239 if (cnt >= BPF_MAX_TRAMP_PROGS) {
243 if (!hlist_unhashed(&prog->aux->tramp_hlist)) {
244 /* prog already linked */
248 hlist_add_head(&prog->aux->tramp_hlist, &tr->progs_hlist[kind]);
249 tr->progs_cnt[kind]++;
250 err = bpf_trampoline_update(prog->aux->trampoline);
252 hlist_del(&prog->aux->tramp_hlist);
253 tr->progs_cnt[kind]--;
256 mutex_unlock(&tr->mutex);
260 /* bpf_trampoline_unlink_prog() should never fail. */
261 int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
263 enum bpf_tramp_prog_type kind;
264 struct bpf_trampoline *tr;
267 tr = prog->aux->trampoline;
268 kind = bpf_attach_type_to_tramp(prog->expected_attach_type);
269 mutex_lock(&tr->mutex);
270 if (kind == BPF_TRAMP_REPLACE) {
271 WARN_ON_ONCE(!tr->extension_prog);
272 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
273 tr->extension_prog->bpf_func, NULL);
274 tr->extension_prog = NULL;
277 hlist_del(&prog->aux->tramp_hlist);
278 tr->progs_cnt[kind]--;
279 err = bpf_trampoline_update(prog->aux->trampoline);
281 mutex_unlock(&tr->mutex);
285 void bpf_trampoline_put(struct bpf_trampoline *tr)
289 mutex_lock(&trampoline_mutex);
290 if (!refcount_dec_and_test(&tr->refcnt))
292 WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
293 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY])))
295 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
297 /* wait for tasks to get out of trampoline before freeing it */
298 synchronize_rcu_tasks();
299 bpf_jit_free_exec(tr->image);
300 hlist_del(&tr->hlist);
303 mutex_unlock(&trampoline_mutex);
306 /* The logic is similar to BPF_PROG_RUN, but with explicit rcu and preempt that
307 * are needed for trampoline. The macro is split into
308 * call _bpf_prog_enter
309 * call prog->bpf_func
310 * call __bpf_prog_exit
312 u64 notrace __bpf_prog_enter(void)
318 if (static_branch_unlikely(&bpf_stats_enabled_key))
319 start = sched_clock();
323 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
325 struct bpf_prog_stats *stats;
327 if (static_branch_unlikely(&bpf_stats_enabled_key) &&
328 /* static_key could be enabled in __bpf_prog_enter
329 * and disabled in __bpf_prog_exit.
331 * Hence check that 'start' is not zero.
334 stats = this_cpu_ptr(prog->aux->stats);
335 u64_stats_update_begin(&stats->syncp);
337 stats->nsecs += sched_clock() - start;
338 u64_stats_update_end(&stats->syncp);
345 arch_prepare_bpf_trampoline(void *image, void *image_end,
346 const struct btf_func_model *m, u32 flags,
347 struct bpf_prog **fentry_progs, int fentry_cnt,
348 struct bpf_prog **fexit_progs, int fexit_cnt,
354 static int __init init_trampolines(void)
358 for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++)
359 INIT_HLIST_HEAD(&trampoline_table[i]);
362 late_initcall(init_trampolines);