struct seccomp_filter {
atomic_t usage;
struct seccomp_filter *prev;
- unsigned short len; /* Instruction count */
- struct sock_filter_int insnsi[];
+ struct sk_filter *prog;
};
/* Limit any path through the tree to 256KB worth of instructions. */
* value always takes priority (ignoring the DATA).
*/
for (f = current->seccomp.filter; f; f = f->prev) {
- u32 cur_ret = sk_run_filter_int_seccomp(&sd, f->insnsi);
+ u32 cur_ret = SK_RUN_FILTER(f->prog, (void *)&sd);
+
if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
ret = cur_ret;
}
return -EINVAL;
for (filter = current->seccomp.filter; filter; filter = filter->prev)
- total_insns += filter->len + 4; /* include a 4 instr penalty */
+ total_insns += filter->prog->len + 4; /* include a 4 instr penalty */
if (total_insns > MAX_INSNS_PER_PATH)
return -ENOMEM;
/* Allocate a new seccomp_filter */
ret = -ENOMEM;
- filter = kzalloc(sizeof(struct seccomp_filter) +
- sizeof(struct sock_filter_int) * new_len,
+ filter = kzalloc(sizeof(struct seccomp_filter),
GFP_KERNEL|__GFP_NOWARN);
if (!filter)
goto free_prog;
- ret = sk_convert_filter(fp, fprog->len, filter->insnsi, &new_len);
- if (ret)
+ filter->prog = kzalloc(sk_filter_size(new_len),
+ GFP_KERNEL|__GFP_NOWARN);
+ if (!filter->prog)
goto free_filter;
+
+ ret = sk_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
+ if (ret)
+ goto free_filter_prog;
kfree(fp);
atomic_set(&filter->usage, 1);
- filter->len = new_len;
+ filter->prog->len = new_len;
+ filter->prog->bpf_func = (void *)sk_run_filter_int_seccomp;
+
+ /* JIT internal BPF into native HW instructions */
+ bpf_int_jit_compile(filter->prog);
/*
* If there is an existing filter, make it the prev and don't drop its
current->seccomp.filter = filter;
return 0;
+free_filter_prog:
+ kfree(filter->prog);
free_filter:
kfree(filter);
free_prog:
while (orig && atomic_dec_and_test(&orig->usage)) {
struct seccomp_filter *freeme = orig;
orig = orig->prev;
+ bpf_jit_free(freeme->prog);
kfree(freeme);
}
}