TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
}
+enum __sk_action {
+ __SK_DROP = 0,
+ __SK_PASS,
+ __SK_REDIRECT,
+};
+
static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
{
struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
int rc;
if (unlikely(!prog))
- return SK_DROP;
+ return __SK_DROP;
skb_orphan(skb);
/* We need to ensure that BPF metadata for maps is also cleared
preempt_enable();
skb->sk = NULL;
+ /* Moving return codes from UAPI namespace into internal namespace */
return rc == SK_PASS ?
- (TCP_SKB_CB(skb)->bpf.map ? SK_REDIRECT : SK_PASS) : SK_DROP;
+ (TCP_SKB_CB(skb)->bpf.map ? __SK_REDIRECT : __SK_PASS) :
+ __SK_DROP;
}
static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
rc = smap_verdict_func(psock, skb);
switch (rc) {
- case SK_REDIRECT:
+ case __SK_REDIRECT:
sk = do_sk_redirect_map(skb);
if (likely(sk)) {
struct smap_psock *peer = smap_psock_sk(sk);
}
}
/* Fall through and free skb otherwise */
- case SK_DROP:
+ case __SK_DROP:
default:
kfree_skb(skb);
}
* @map: pointer to sockmap
* @key: key to lookup sock in map
* @flags: reserved for future use
- * Return: SK_REDIRECT
+ * Return: SK_PASS
*
* int bpf_sock_map_update(skops, map, key, flags)
* @skops: pointer to bpf_sock_ops
enum sk_action {
SK_DROP = 0,
SK_PASS,
- SK_REDIRECT,
};
#define BPF_TAG_SIZE 8