Merge tag 'mips_fixes_4.15_2' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan...
[sfrench/cifs-2.6.git] / kernel / bpf / verifier.c
index 04b24876cd23c83c9502afc60853c871ee3fee13..13551e62350148942949006100a1b66b71b7b8e8 100644 (file)
@@ -978,6 +978,13 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
        return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno);
 }
 
+static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
+{
+       const struct bpf_reg_state *reg = cur_regs(env) + regno;
+
+       return reg->type == PTR_TO_CTX;
+}
+
 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
                                   const struct bpf_reg_state *reg,
                                   int off, int size, bool strict)
@@ -1258,6 +1265,12 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
                return -EACCES;
        }
 
+       if (is_ctx_reg(env, insn->dst_reg)) {
+               verbose(env, "BPF_XADD stores into R%d context is not allowed\n",
+                       insn->dst_reg);
+               return -EACCES;
+       }
+
        /* check whether atomic_add can read the memory */
        err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
                               BPF_SIZE(insn->code), BPF_READ, -1);
@@ -1729,6 +1742,13 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
        err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
        if (err)
                return err;
+       if (func_id == BPF_FUNC_tail_call) {
+               if (meta.map_ptr == NULL) {
+                       verbose(env, "verifier bug\n");
+                       return -EINVAL;
+               }
+               env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr;
+       }
        err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
        if (err)
                return err;
@@ -1875,17 +1895,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 
        dst_reg = &regs[dst];
 
-       if (WARN_ON_ONCE(known && (smin_val != smax_val))) {
-               print_verifier_state(env, env->cur_state);
-               verbose(env,
-                       "verifier internal error: known but bad sbounds\n");
-               return -EINVAL;
-       }
-       if (WARN_ON_ONCE(known && (umin_val != umax_val))) {
-               print_verifier_state(env, env->cur_state);
-               verbose(env,
-                       "verifier internal error: known but bad ubounds\n");
-               return -EINVAL;
+       if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
+           smin_val > smax_val || umin_val > umax_val) {
+               /* Taint dst register if offset had invalid bounds derived from
+                * e.g. dead branches.
+                */
+               __mark_reg_unknown(dst_reg);
+               return 0;
        }
 
        if (BPF_CLASS(insn->code) != BPF_ALU64) {
@@ -2077,6 +2093,15 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
        src_known = tnum_is_const(src_reg.var_off);
        dst_known = tnum_is_const(dst_reg->var_off);
 
+       if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
+           smin_val > smax_val || umin_val > umax_val) {
+               /* Taint dst register if offset had invalid bounds derived from
+                * e.g. dead branches.
+                */
+               __mark_reg_unknown(dst_reg);
+               return 0;
+       }
+
        if (!src_known &&
            opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
                __mark_reg_unknown(dst_reg);
@@ -2486,6 +2511,11 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                        return -EINVAL;
                }
 
+               if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) {
+                       verbose(env, "BPF_ARSH not supported for 32 bit ALU\n");
+                       return -EINVAL;
+               }
+
                if ((opcode == BPF_LSH || opcode == BPF_RSH ||
                     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
                        int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
@@ -3981,6 +4011,12 @@ static int do_check(struct bpf_verifier_env *env)
                        if (err)
                                return err;
 
+                       if (is_ctx_reg(env, insn->dst_reg)) {
+                               verbose(env, "BPF_ST stores into R%d context is not allowed\n",
+                                       insn->dst_reg);
+                               return -EACCES;
+                       }
+
                        /* check that memory (dst_reg + off) is writeable */
                        err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
                                               BPF_SIZE(insn->code), BPF_WRITE,
@@ -4433,6 +4469,24 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
        int i, cnt, delta = 0;
 
        for (i = 0; i < insn_cnt; i++, insn++) {
+               if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
+                   insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
+                       /* due to JIT bugs clear upper 32-bits of src register
+                        * before div/mod operation
+                        */
+                       insn_buf[0] = BPF_MOV32_REG(insn->src_reg, insn->src_reg);
+                       insn_buf[1] = *insn;
+                       cnt = 2;
+                       new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+                       if (!new_prog)
+                               return -ENOMEM;
+
+                       delta    += cnt - 1;
+                       env->prog = prog = new_prog;
+                       insn      = new_prog->insnsi + i + delta;
+                       continue;
+               }
+
                if (insn->code != (BPF_JMP | BPF_CALL))
                        continue;
 
@@ -4456,6 +4510,35 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
                         */
                        insn->imm = 0;
                        insn->code = BPF_JMP | BPF_TAIL_CALL;
+
+                       /* instead of changing every JIT dealing with tail_call
+                        * emit two extra insns:
+                        * if (index >= max_entries) goto out;
+                        * index &= array->index_mask;
+                        * to avoid out-of-bounds cpu speculation
+                        */
+                       map_ptr = env->insn_aux_data[i + delta].map_ptr;
+                       if (map_ptr == BPF_MAP_PTR_POISON) {
+                               verbose(env, "tail_call abusing map_ptr\n");
+                               return -EINVAL;
+                       }
+                       if (!map_ptr->unpriv_array)
+                               continue;
+                       insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
+                                                 map_ptr->max_entries, 2);
+                       insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
+                                                   container_of(map_ptr,
+                                                                struct bpf_array,
+                                                                map)->index_mask);
+                       insn_buf[2] = *insn;
+                       cnt = 3;
+                       new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+                       if (!new_prog)
+                               return -ENOMEM;
+
+                       delta    += cnt - 1;
+                       env->prog = prog = new_prog;
+                       insn      = new_prog->insnsi + i + delta;
                        continue;
                }