enum {
- SKB_FCLONE_UNAVAILABLE,
- SKB_FCLONE_ORIG,
- SKB_FCLONE_CLONE,
+ SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */
+ SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */
+ SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */
+ SKB_FCLONE_FREE, /* this companion fclone skb is available */
};
enum {
skb->fclone = SKB_FCLONE_ORIG;
atomic_set(&fclones->fclone_ref, 1);
- fclones->skb2.fclone = SKB_FCLONE_UNAVAILABLE;
+ fclones->skb2.fclone = SKB_FCLONE_FREE;
fclones->skb2.pfmemalloc = pfmemalloc;
}
out:
fclones = container_of(skb, struct sk_buff_fclones, skb2);
/* Warning : We must perform the atomic_dec_and_test() before
- * setting skb->fclone back to SKB_FCLONE_UNAVAILABLE, otherwise
+ * setting skb->fclone back to SKB_FCLONE_FREE, otherwise
* skb_clone() could set clone_ref to 2 before our decrement.
* Anyway, if we are going to free the structure, no need to
* rewrite skb->fclone.
/* The clone portion is available for
* fast-cloning again.
*/
- skb->fclone = SKB_FCLONE_UNAVAILABLE;
+ skb->fclone = SKB_FCLONE_FREE;
}
break;
}
return NULL;
if (skb->fclone == SKB_FCLONE_ORIG &&
- n->fclone == SKB_FCLONE_UNAVAILABLE) {
+ n->fclone == SKB_FCLONE_FREE) {
n->fclone = SKB_FCLONE_CLONE;
/* As our fastclone was free, clone_ref must be 1 at this point.
* We could use atomic_inc() here, but it is faster