nlm: Ensure callback code also checks that the files match
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / skbuff.h
CommitLineData
1da177e4
LT
1/*
2 * Definitions for the 'struct sk_buff' memory handlers.
3 *
4 * Authors:
5 * Alan Cox, <gw4pts@gw4pts.ampr.org>
6 * Florian La Roche, <rzsfl@rz.uni-sb.de>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
1da177e4 17#include <linux/kernel.h>
fe55f6d5 18#include <linux/kmemcheck.h>
1da177e4
LT
19#include <linux/compiler.h>
20#include <linux/time.h>
187f1882 21#include <linux/bug.h>
1da177e4
LT
22#include <linux/cache.h>
23
60063497 24#include <linux/atomic.h>
1da177e4
LT
25#include <asm/types.h>
26#include <linux/spinlock.h>
1da177e4 27#include <linux/net.h>
3fc7e8a6 28#include <linux/textsearch.h>
1da177e4 29#include <net/checksum.h>
a80958f4 30#include <linux/rcupdate.h>
97fc2f08 31#include <linux/dmaengine.h>
b7aa0bf7 32#include <linux/hrtimer.h>
131ea667 33#include <linux/dma-mapping.h>
c8f44aff 34#include <linux/netdev_features.h>
5203cd28 35#include <net/flow_keys.h>
1da177e4 36
60476372 37/* Don't change this without changing skb_csum_unnecessary! */
1da177e4 38#define CHECKSUM_NONE 0
60476372
HX
39#define CHECKSUM_UNNECESSARY 1
40#define CHECKSUM_COMPLETE 2
41#define CHECKSUM_PARTIAL 3
1da177e4
LT
42
43#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
44 ~(SMP_CACHE_BYTES - 1))
fc910a27 45#define SKB_WITH_OVERHEAD(X) \
deea84b0 46 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
fc910a27
DM
47#define SKB_MAX_ORDER(X, ORDER) \
48 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
1da177e4
LT
49#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
50#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
51
87fb4b7b
ED
52/* return minimum truesize of one skb containing X bytes of data */
53#define SKB_TRUESIZE(X) ((X) + \
54 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
55 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
56
1da177e4
LT
57/* A. Checksumming of received packets by device.
58 *
59 * NONE: device failed to checksum this packet.
60 * skb->csum is undefined.
61 *
62 * UNNECESSARY: device parsed packet and wouldbe verified checksum.
63 * skb->csum is undefined.
64 * It is bad option, but, unfortunately, many of vendors do this.
65 * Apparently with secret goal to sell you new device, when you
66 * will add new protocol to your host. F.e. IPv6. 8)
67 *
84fa7933 68 * COMPLETE: the most generic way. Device supplied checksum of _all_
1da177e4
LT
69 * the packet as seen by netif_rx in skb->csum.
70 * NOTE: Even if device supports only some protocols, but
84fa7933 71 * is able to produce some skb->csum, it MUST use COMPLETE,
1da177e4
LT
72 * not UNNECESSARY.
73 *
c6c6e3e0
HX
74 * PARTIAL: identical to the case for output below. This may occur
75 * on a packet received directly from another Linux OS, e.g.,
76 * a virtualised Linux kernel on the same host. The packet can
77 * be treated in the same way as UNNECESSARY except that on
78 * output (i.e., forwarding) the checksum must be filled in
79 * by the OS or the hardware.
80 *
1da177e4
LT
81 * B. Checksumming on output.
82 *
83 * NONE: skb is checksummed by protocol or csum is not required.
84 *
84fa7933 85 * PARTIAL: device is required to csum packet as seen by hard_start_xmit
c6c6e3e0
HX
86 * from skb->csum_start to the end and to record the checksum
87 * at skb->csum_start + skb->csum_offset.
1da177e4
LT
88 *
89 * Device must show its capabilities in dev->features, set
90 * at device setup time.
91 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum
92 * everything.
1da177e4
LT
93 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only
94 * TCP/UDP over IPv4. Sigh. Vendors like this
95 * way by an unknown reason. Though, see comment above
96 * about CHECKSUM_UNNECESSARY. 8)
c6c6e3e0 97 * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
1da177e4 98 *
3af79302
YZ
99 * UNNECESSARY: device will do per protocol specific csum. Protocol drivers
100 * that do not want net to perform the checksum calculation should use
101 * this flag in their outgoing skbs.
102 * NETIF_F_FCOE_CRC this indicates the device can do FCoE FC CRC
103 * offload. Correspondingly, the FCoE protocol driver
104 * stack should use CHECKSUM_UNNECESSARY.
105 *
1da177e4
LT
106 * Any questions? No questions, good. --ANK
107 */
108
1da177e4 109struct net_device;
716ea3a7 110struct scatterlist;
9c55e01c 111struct pipe_inode_info;
1da177e4 112
5f79e0f9 113#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1da177e4
LT
114struct nf_conntrack {
115 atomic_t use;
1da177e4 116};
5f79e0f9 117#endif
1da177e4
LT
118
119#ifdef CONFIG_BRIDGE_NETFILTER
120struct nf_bridge_info {
bf1ac5ca
ED
121 atomic_t use;
122 unsigned int mask;
123 struct net_device *physindev;
124 struct net_device *physoutdev;
125 unsigned long data[32 / sizeof(unsigned long)];
1da177e4
LT
126};
127#endif
128
1da177e4
LT
129struct sk_buff_head {
130 /* These two members must be first. */
131 struct sk_buff *next;
132 struct sk_buff *prev;
133
134 __u32 qlen;
135 spinlock_t lock;
136};
137
138struct sk_buff;
139
9d4dde52
IC
140/* To allow 64K frame to be packed as single skb without frag_list we
141 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
142 * buffers which do not start on a page boundary.
143 *
144 * Since GRO uses frags we allocate at least 16 regardless of page
145 * size.
a715dea3 146 */
9d4dde52 147#if (65536/PAGE_SIZE + 1) < 16
eec00954 148#define MAX_SKB_FRAGS 16UL
a715dea3 149#else
9d4dde52 150#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
a715dea3 151#endif
1da177e4
LT
152
153typedef struct skb_frag_struct skb_frag_t;
154
155struct skb_frag_struct {
a8605c60
IC
156 struct {
157 struct page *p;
158 } page;
cb4dfe56 159#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
a309bb07
DM
160 __u32 page_offset;
161 __u32 size;
cb4dfe56
ED
162#else
163 __u16 page_offset;
164 __u16 size;
165#endif
1da177e4
LT
166};
167
9e903e08
ED
168static inline unsigned int skb_frag_size(const skb_frag_t *frag)
169{
170 return frag->size;
171}
172
173static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
174{
175 frag->size = size;
176}
177
178static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
179{
180 frag->size += delta;
181}
182
183static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
184{
185 frag->size -= delta;
186}
187
ac45f602
PO
188#define HAVE_HW_TIME_STAMP
189
190/**
d3a21be8 191 * struct skb_shared_hwtstamps - hardware time stamps
ac45f602
PO
192 * @hwtstamp: hardware time stamp transformed into duration
193 * since arbitrary point in time
194 * @syststamp: hwtstamp transformed to system time base
195 *
196 * Software time stamps generated by ktime_get_real() are stored in
197 * skb->tstamp. The relation between the different kinds of time
198 * stamps is as follows:
199 *
200 * syststamp and tstamp can be compared against each other in
201 * arbitrary combinations. The accuracy of a
202 * syststamp/tstamp/"syststamp from other device" comparison is
203 * limited by the accuracy of the transformation into system time
204 * base. This depends on the device driver and its underlying
205 * hardware.
206 *
207 * hwtstamps can only be compared against other hwtstamps from
208 * the same device.
209 *
210 * This structure is attached to packets as part of the
211 * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
212 */
213struct skb_shared_hwtstamps {
214 ktime_t hwtstamp;
215 ktime_t syststamp;
216};
217
2244d07b
OH
218/* Definitions for tx_flags in struct skb_shared_info */
219enum {
220 /* generate hardware time stamp */
221 SKBTX_HW_TSTAMP = 1 << 0,
222
223 /* generate software time stamp */
224 SKBTX_SW_TSTAMP = 1 << 1,
225
226 /* device driver is going to provide hardware time stamp */
227 SKBTX_IN_PROGRESS = 1 << 2,
228
a6686f2f 229 /* device driver supports TX zero-copy buffers */
62b1a8ab 230 SKBTX_DEV_ZEROCOPY = 1 << 3,
6e3e939f
JB
231
232 /* generate wifi status information (where possible) */
62b1a8ab 233 SKBTX_WIFI_STATUS = 1 << 4,
c9af6db4
PS
234
235 /* This indicates at least one fragment might be overwritten
236 * (as in vmsplice(), sendfile() ...)
237 * If we need to compute a TX checksum, we'll need to copy
238 * all frags to avoid possible bad checksum
239 */
240 SKBTX_SHARED_FRAG = 1 << 5,
a6686f2f
SM
241};
242
243/*
244 * The callback notifies userspace to release buffers when skb DMA is done in
245 * lower device, the skb last reference should be 0 when calling this.
e19d6763
MT
246 * The zerocopy_success argument is true if zero copy transmit occurred,
247 * false on data copy or out of memory error caused by data copy attempt.
ca8f4fb2
MT
248 * The ctx field is used to track device context.
249 * The desc field is used to track userspace buffer index.
a6686f2f
SM
250 */
251struct ubuf_info {
e19d6763 252 void (*callback)(struct ubuf_info *, bool zerocopy_success);
ca8f4fb2 253 void *ctx;
a6686f2f 254 unsigned long desc;
ac45f602
PO
255};
256
1da177e4
LT
257/* This data is invariant across clones and lives at
258 * the end of the header data, ie. at skb->end.
259 */
260struct skb_shared_info {
9f42f126
IC
261 unsigned char nr_frags;
262 __u8 tx_flags;
7967168c
HX
263 unsigned short gso_size;
264 /* Warning: this field is not always filled in (UFO)! */
265 unsigned short gso_segs;
266 unsigned short gso_type;
1da177e4 267 struct sk_buff *frag_list;
ac45f602 268 struct skb_shared_hwtstamps hwtstamps;
9f42f126 269 __be32 ip6_frag_id;
ec7d2f2c
ED
270
271 /*
272 * Warning : all fields before dataref are cleared in __alloc_skb()
273 */
274 atomic_t dataref;
275
69e3c75f
JB
276 /* Intermediate layers must ensure that destructor_arg
277 * remains valid until skb destructor */
278 void * destructor_arg;
a6686f2f 279
fed66381
ED
280 /* must be last field, see pskb_expand_head() */
281 skb_frag_t frags[MAX_SKB_FRAGS];
1da177e4
LT
282};
283
284/* We divide dataref into two halves. The higher 16 bits hold references
285 * to the payload part of skb->data. The lower 16 bits hold references to
334a8132
PM
286 * the entire skb->data. A clone of a headerless skb holds the length of
287 * the header in skb->hdr_len.
1da177e4
LT
288 *
289 * All users must obey the rule that the skb->data reference count must be
290 * greater than or equal to the payload reference count.
291 *
292 * Holding a reference to the payload part means that the user does not
293 * care about modifications to the header part of skb->data.
294 */
295#define SKB_DATAREF_SHIFT 16
296#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
297
d179cd12
DM
298
299enum {
300 SKB_FCLONE_UNAVAILABLE,
301 SKB_FCLONE_ORIG,
302 SKB_FCLONE_CLONE,
303};
304
7967168c
HX
305enum {
306 SKB_GSO_TCPV4 = 1 << 0,
f83ef8c0 307 SKB_GSO_UDP = 1 << 1,
576a30eb
HX
308
309 /* This indicates the skb is from an untrusted source. */
310 SKB_GSO_DODGY = 1 << 2,
b0da8537
MC
311
312 /* This indicates the tcp segment has CWR set. */
f83ef8c0
HX
313 SKB_GSO_TCP_ECN = 1 << 3,
314
315 SKB_GSO_TCPV6 = 1 << 4,
01d5b2fc
CL
316
317 SKB_GSO_FCOE = 1 << 5,
68c33163
PS
318
319 SKB_GSO_GRE = 1 << 6,
73136267
PS
320
321 SKB_GSO_UDP_TUNNEL = 1 << 7,
7967168c
HX
322};
323
2e07fa9c
ACM
324#if BITS_PER_LONG > 32
325#define NET_SKBUFF_DATA_USES_OFFSET 1
326#endif
327
328#ifdef NET_SKBUFF_DATA_USES_OFFSET
329typedef unsigned int sk_buff_data_t;
330#else
331typedef unsigned char *sk_buff_data_t;
332#endif
333
1da177e4
LT
334/**
335 * struct sk_buff - socket buffer
336 * @next: Next buffer in list
337 * @prev: Previous buffer in list
325ed823 338 * @tstamp: Time we arrived
d84e0bd7 339 * @sk: Socket we are owned by
1da177e4 340 * @dev: Device we arrived on/are leaving by
d84e0bd7 341 * @cb: Control buffer. Free for use by every layer. Put private vars here
7fee226a 342 * @_skb_refdst: destination entry (with norefcount bit)
67be2dd1 343 * @sp: the security path, used for xfrm
1da177e4
LT
344 * @len: Length of actual data
345 * @data_len: Data length
346 * @mac_len: Length of link layer header
334a8132 347 * @hdr_len: writable header length of cloned skb
663ead3b
HX
348 * @csum: Checksum (must include start/offset pair)
349 * @csum_start: Offset from skb->head where checksumming should start
350 * @csum_offset: Offset from csum_start where checksum should be stored
d84e0bd7 351 * @priority: Packet queueing priority
67be2dd1 352 * @local_df: allow local fragmentation
1da177e4 353 * @cloned: Head may be cloned (check refcnt to be sure)
d84e0bd7 354 * @ip_summed: Driver fed us an IP checksum
1da177e4 355 * @nohdr: Payload reference only, must not modify header
d84e0bd7 356 * @nfctinfo: Relationship of this skb to the connection
1da177e4 357 * @pkt_type: Packet class
c83c2486 358 * @fclone: skbuff clone status
c83c2486 359 * @ipvs_property: skbuff is owned by ipvs
31729363
RD
360 * @peeked: this packet has been seen already, so stats have been
361 * done for it, don't do them again
ba9dda3a 362 * @nf_trace: netfilter packet trace flag
d84e0bd7
DB
363 * @protocol: Packet protocol from driver
364 * @destructor: Destruct function
365 * @nfct: Associated connection, if any
1da177e4 366 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
8964be4a 367 * @skb_iif: ifindex of device we arrived on
1da177e4
LT
368 * @tc_index: Traffic control index
369 * @tc_verd: traffic control verdict
d84e0bd7
DB
370 * @rxhash: the packet hash computed on receive
371 * @queue_mapping: Queue mapping for multiqueue devices
553a5672 372 * @ndisc_nodetype: router type (from link layer)
d84e0bd7 373 * @ooo_okay: allow the mapping of a socket to a queue to be changed
4ca2462e
CG
374 * @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport
375 * ports.
6e3e939f
JB
376 * @wifi_acked_valid: wifi_acked was set
377 * @wifi_acked: whether frame was acked on wifi or not
3bdc0eba 378 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
f4b8ea78
RD
379 * @dma_cookie: a cookie to one of several possible DMA operations
380 * done by skb DMA functions
984bc16c 381 * @secmark: security marking
d84e0bd7
DB
382 * @mark: Generic packet mark
383 * @dropcount: total number of sk_receive_queue overflows
86a9bad3 384 * @vlan_proto: vlan encapsulation protocol
6aa895b0 385 * @vlan_tci: vlan tag control information
6a674e9c
JG
386 * @inner_transport_header: Inner transport layer header (encapsulation)
387 * @inner_network_header: Network layer header (encapsulation)
aefbd2b3 388 * @inner_mac_header: Link layer header (encapsulation)
d84e0bd7
DB
389 * @transport_header: Transport layer header
390 * @network_header: Network layer header
391 * @mac_header: Link layer header
392 * @tail: Tail pointer
393 * @end: End pointer
394 * @head: Head of buffer
395 * @data: Data head pointer
396 * @truesize: Buffer size
397 * @users: User count - see {datagram,tcp}.c
1da177e4
LT
398 */
399
400struct sk_buff {
401 /* These two members must be first. */
402 struct sk_buff *next;
403 struct sk_buff *prev;
404
b7aa0bf7 405 ktime_t tstamp;
da3f5cf1
FF
406
407 struct sock *sk;
1da177e4 408 struct net_device *dev;
1da177e4 409
1da177e4
LT
410 /*
411 * This is the control buffer. It is free to use for every
412 * layer. Please put your private variables there. If you
413 * want to keep them across layers you have to do a skb_clone()
414 * first. This is owned by whoever has the skb queued ATM.
415 */
da3f5cf1 416 char cb[48] __aligned(8);
1da177e4 417
7fee226a 418 unsigned long _skb_refdst;
da3f5cf1
FF
419#ifdef CONFIG_XFRM
420 struct sec_path *sp;
421#endif
1da177e4 422 unsigned int len,
334a8132
PM
423 data_len;
424 __u16 mac_len,
425 hdr_len;
ff1dcadb
AV
426 union {
427 __wsum csum;
663ead3b
HX
428 struct {
429 __u16 csum_start;
430 __u16 csum_offset;
431 };
ff1dcadb 432 };
1da177e4 433 __u32 priority;
fe55f6d5 434 kmemcheck_bitfield_begin(flags1);
1cbb3380
TG
435 __u8 local_df:1,
436 cloned:1,
437 ip_summed:2,
6869c4d8
HW
438 nohdr:1,
439 nfctinfo:3;
d179cd12 440 __u8 pkt_type:3,
b84f4cc9 441 fclone:2,
ba9dda3a 442 ipvs_property:1,
a59322be 443 peeked:1,
ba9dda3a 444 nf_trace:1;
fe55f6d5 445 kmemcheck_bitfield_end(flags1);
4ab408de 446 __be16 protocol;
1da177e4
LT
447
448 void (*destructor)(struct sk_buff *skb);
9fb9cbb1 449#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
5f79e0f9 450 struct nf_conntrack *nfct;
2fc72c7b 451#endif
1da177e4
LT
452#ifdef CONFIG_BRIDGE_NETFILTER
453 struct nf_bridge_info *nf_bridge;
454#endif
f25f4e44 455
8964be4a 456 int skb_iif;
4031ae6e
AD
457
458 __u32 rxhash;
459
86a9bad3 460 __be16 vlan_proto;
4031ae6e
AD
461 __u16 vlan_tci;
462
1da177e4 463#ifdef CONFIG_NET_SCHED
b6b99eb5 464 __u16 tc_index; /* traffic control index */
1da177e4 465#ifdef CONFIG_NET_CLS_ACT
b6b99eb5 466 __u16 tc_verd; /* traffic control verdict */
1da177e4 467#endif
1da177e4 468#endif
fe55f6d5 469
0a14842f 470 __u16 queue_mapping;
fe55f6d5 471 kmemcheck_bitfield_begin(flags2);
de357cc0 472#ifdef CONFIG_IPV6_NDISC_NODETYPE
8a4eb573 473 __u8 ndisc_nodetype:2;
d0f09804 474#endif
c93bdd0e 475 __u8 pfmemalloc:1;
3853b584 476 __u8 ooo_okay:1;
bdeab991 477 __u8 l4_rxhash:1;
6e3e939f
JB
478 __u8 wifi_acked_valid:1;
479 __u8 wifi_acked:1;
3bdc0eba 480 __u8 no_fcs:1;
d3836f21 481 __u8 head_frag:1;
6a674e9c
JG
482 /* Encapsulation protocol and NIC drivers should use
483 * this flag to indicate to each other if the skb contains
484 * encapsulated packet or not and maybe use the inner packet
485 * headers if needed
486 */
487 __u8 encapsulation:1;
488 /* 7/9 bit hole (depending on ndisc_nodetype presence) */
fe55f6d5
VN
489 kmemcheck_bitfield_end(flags2);
490
97fc2f08
CL
491#ifdef CONFIG_NET_DMA
492 dma_cookie_t dma_cookie;
493#endif
984bc16c
JM
494#ifdef CONFIG_NETWORK_SECMARK
495 __u32 secmark;
496#endif
3b885787
NH
497 union {
498 __u32 mark;
499 __u32 dropcount;
16fad69c 500 __u32 reserved_tailroom;
3b885787 501 };
1da177e4 502
6a674e9c
JG
503 sk_buff_data_t inner_transport_header;
504 sk_buff_data_t inner_network_header;
aefbd2b3 505 sk_buff_data_t inner_mac_header;
27a884dc
ACM
506 sk_buff_data_t transport_header;
507 sk_buff_data_t network_header;
508 sk_buff_data_t mac_header;
1da177e4 509 /* These elements must be at the end, see alloc_skb() for details. */
27a884dc 510 sk_buff_data_t tail;
4305b541 511 sk_buff_data_t end;
1da177e4 512 unsigned char *head,
4305b541 513 *data;
27a884dc
ACM
514 unsigned int truesize;
515 atomic_t users;
1da177e4
LT
516};
517
518#ifdef __KERNEL__
519/*
520 * Handling routines are only of interest to the kernel
521 */
522#include <linux/slab.h>
523
1da177e4 524
c93bdd0e
MG
525#define SKB_ALLOC_FCLONE 0x01
526#define SKB_ALLOC_RX 0x02
527
528/* Returns true if the skb was allocated from PFMEMALLOC reserves */
529static inline bool skb_pfmemalloc(const struct sk_buff *skb)
530{
531 return unlikely(skb->pfmemalloc);
532}
533
7fee226a
ED
534/*
535 * skb might have a dst pointer attached, refcounted or not.
536 * _skb_refdst low order bit is set if refcount was _not_ taken
537 */
538#define SKB_DST_NOREF 1UL
539#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
540
541/**
542 * skb_dst - returns skb dst_entry
543 * @skb: buffer
544 *
545 * Returns skb dst_entry, regardless of reference taken or not.
546 */
adf30907
ED
547static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
548{
7fee226a
ED
549 /* If refdst was not refcounted, check we still are in a
550 * rcu_read_lock section
551 */
552 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
553 !rcu_read_lock_held() &&
554 !rcu_read_lock_bh_held());
555 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
adf30907
ED
556}
557
7fee226a
ED
558/**
559 * skb_dst_set - sets skb dst
560 * @skb: buffer
561 * @dst: dst entry
562 *
563 * Sets skb dst, assuming a reference was taken on dst and should
564 * be released by skb_dst_drop()
565 */
adf30907
ED
566static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
567{
7fee226a
ED
568 skb->_skb_refdst = (unsigned long)dst;
569}
570
932bc4d7
JA
571extern void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
572 bool force);
573
574/**
575 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
576 * @skb: buffer
577 * @dst: dst entry
578 *
579 * Sets skb dst, assuming a reference was not taken on dst.
580 * If dst entry is cached, we do not take reference and dst_release
581 * will be avoided by refdst_drop. If dst entry is not cached, we take
582 * reference, so that last dst_release can destroy the dst immediately.
583 */
584static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
585{
586 __skb_dst_set_noref(skb, dst, false);
587}
588
589/**
590 * skb_dst_set_noref_force - sets skb dst, without taking reference
591 * @skb: buffer
592 * @dst: dst entry
593 *
594 * Sets skb dst, assuming a reference was not taken on dst.
595 * No reference is taken and no dst_release will be called. While for
596 * cached dsts deferred reclaim is a basic feature, for entries that are
597 * not cached it is caller's job to guarantee that last dst_release for
598 * provided dst happens when nobody uses it, eg. after a RCU grace period.
599 */
600static inline void skb_dst_set_noref_force(struct sk_buff *skb,
601 struct dst_entry *dst)
602{
603 __skb_dst_set_noref(skb, dst, true);
604}
7fee226a
ED
605
606/**
25985edc 607 * skb_dst_is_noref - Test if skb dst isn't refcounted
7fee226a
ED
608 * @skb: buffer
609 */
610static inline bool skb_dst_is_noref(const struct sk_buff *skb)
611{
612 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
adf30907
ED
613}
614
511c3f92
ED
615static inline struct rtable *skb_rtable(const struct sk_buff *skb)
616{
adf30907 617 return (struct rtable *)skb_dst(skb);
511c3f92
ED
618}
619