}
EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
-static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg,
- int src_reg, int ctx_off,
+static u32 sk_filter_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog)
{
struct bpf_insn *insn = insn_buf;
+ int off;
- switch (ctx_off) {
+ switch (si->off) {
case offsetof(struct __sk_buff, len):
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, len));
break;
case offsetof(struct __sk_buff, protocol):
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
- *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, protocol));
break;
case offsetof(struct __sk_buff, vlan_proto):
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
- *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, vlan_proto));
break;
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4);
if (type == BPF_WRITE)
- *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, priority));
else
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, priority));
break;
case offsetof(struct __sk_buff, ingress_ifindex):
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, skb_iif) != 4);
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, skb_iif));
break;
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
- dst_reg, src_reg,
+ si->dst_reg, si->src_reg,
offsetof(struct sk_buff, dev));
- *insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1);
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg,
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
offsetof(struct net_device, ifindex));
break;
case offsetof(struct __sk_buff, hash):
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, hash));
break;
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
if (type == BPF_WRITE)
- *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, mark));
else
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, mark));
break;
case offsetof(struct __sk_buff, pkt_type):
- return convert_skb_access(SKF_AD_PKTTYPE, dst_reg, src_reg, insn);
+ return convert_skb_access(SKF_AD_PKTTYPE, si->dst_reg,
+ si->src_reg, insn);
case offsetof(struct __sk_buff, queue_mapping):
- return convert_skb_access(SKF_AD_QUEUE, dst_reg, src_reg, insn);
+ return convert_skb_access(SKF_AD_QUEUE, si->dst_reg,
+ si->src_reg, insn);
case offsetof(struct __sk_buff, vlan_present):
return convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
- dst_reg, src_reg, insn);
+ si->dst_reg, si->src_reg, insn);
case offsetof(struct __sk_buff, vlan_tci):
return convert_skb_access(SKF_AD_VLAN_TAG,
- dst_reg, src_reg, insn);
+ si->dst_reg, si->src_reg, insn);
case offsetof(struct __sk_buff, cb[0]) ...
offsetof(struct __sk_buff, cb[4]):
BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
prog->cb_access = 1;
- ctx_off -= offsetof(struct __sk_buff, cb[0]);
- ctx_off += offsetof(struct sk_buff, cb);
- ctx_off += offsetof(struct qdisc_skb_cb, data);
+ off = si->off;
+ off -= offsetof(struct __sk_buff, cb[0]);
+ off += offsetof(struct sk_buff, cb);
+ off += offsetof(struct qdisc_skb_cb, data);
if (type == BPF_WRITE)
- *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
+ *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg,
+ si->src_reg, off);
else
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg,
+ si->src_reg, off);
break;
case offsetof(struct __sk_buff, tc_classid):
- ctx_off -= offsetof(struct __sk_buff, tc_classid);
- ctx_off += offsetof(struct sk_buff, cb);
- ctx_off += offsetof(struct qdisc_skb_cb, tc_classid);
+ BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2);
+
+ off = si->off;
+ off -= offsetof(struct __sk_buff, tc_classid);
+ off += offsetof(struct sk_buff, cb);
+ off += offsetof(struct qdisc_skb_cb, tc_classid);
if (type == BPF_WRITE)
- *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
+ *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
+ si->src_reg, off);
else
- *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
+ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg,
+ si->src_reg, off);
break;
case offsetof(struct __sk_buff, data):
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
- dst_reg, src_reg,
+ si->dst_reg, si->src_reg,
offsetof(struct sk_buff, data));
break;
case offsetof(struct __sk_buff, data_end):
- ctx_off -= offsetof(struct __sk_buff, data_end);
- ctx_off += offsetof(struct sk_buff, cb);
- ctx_off += offsetof(struct bpf_skb_data_end, data_end);
- *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), dst_reg, src_reg,
- ctx_off);
+ off = si->off;
+ off -= offsetof(struct __sk_buff, data_end);
+ off += offsetof(struct sk_buff, cb);
+ off += offsetof(struct bpf_skb_data_end, data_end);
+ *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
+ si->src_reg, off);
break;
case offsetof(struct __sk_buff, tc_index):
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2);
if (type == BPF_WRITE)
- *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg,
+ *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, tc_index));
else
- *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, tc_index));
- break;
#else
if (type == BPF_WRITE)
- *insn++ = BPF_MOV64_REG(dst_reg, dst_reg);
+ *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
else
- *insn++ = BPF_MOV64_IMM(dst_reg, 0);
- break;
+ *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
#endif
+ break;
}
return insn - insn_buf;
}
static u32 sock_filter_convert_ctx_access(enum bpf_access_type type,
- int dst_reg, int src_reg,
- int ctx_off,
+ const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog)
{
struct bpf_insn *insn = insn_buf;
- switch (ctx_off) {
+ switch (si->off) {
case offsetof(struct bpf_sock, bound_dev_if):
BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_bound_dev_if) != 4);
if (type == BPF_WRITE)
- *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sock, sk_bound_dev_if));
else
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sock, sk_bound_dev_if));
break;
case offsetof(struct bpf_sock, family):
BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_family) != 2);
- *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
offsetof(struct sock, sk_family));
break;
case offsetof(struct bpf_sock, type):
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sock, __sk_flags_offset));
- *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, SK_FL_TYPE_MASK);
- *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, SK_FL_TYPE_SHIFT);
+ *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
+ *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
break;
case offsetof(struct bpf_sock, protocol):
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sock, __sk_flags_offset));
- *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, SK_FL_PROTO_MASK);
- *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, SK_FL_PROTO_SHIFT);
+ *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
+ *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT);
break;
}
return insn - insn_buf;
}
-static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type, int dst_reg,
- int src_reg, int ctx_off,
+static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog)
{
struct bpf_insn *insn = insn_buf;
- switch (ctx_off) {
+ switch (si->off) {
case offsetof(struct __sk_buff, ifindex):
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
- dst_reg, src_reg,
+ si->dst_reg, si->src_reg,
offsetof(struct sk_buff, dev));
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg,
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
offsetof(struct net_device, ifindex));
break;
default:
- return sk_filter_convert_ctx_access(type, dst_reg, src_reg,
- ctx_off, insn_buf, prog);
+ return sk_filter_convert_ctx_access(type, si, insn_buf, prog);
}
return insn - insn_buf;
}
-static u32 xdp_convert_ctx_access(enum bpf_access_type type, int dst_reg,
- int src_reg, int ctx_off,
+static u32 xdp_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog)
{
struct bpf_insn *insn = insn_buf;
- switch (ctx_off) {
+ switch (si->off) {
case offsetof(struct xdp_md, data):
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data),
- dst_reg, src_reg,
+ si->dst_reg, si->src_reg,
offsetof(struct xdp_buff, data));
break;
case offsetof(struct xdp_md, data_end):
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end),
- dst_reg, src_reg,
+ si->dst_reg, si->src_reg,
offsetof(struct xdp_buff, data_end));
break;
}