Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Definitions for the Interfaces handler. | |
7 | * | |
8 | * Version: @(#)dev.h 1.0.10 08/12/93 | |
9 | * | |
02c30a84 | 10 | * Authors: Ross Biro |
1da177e4 LT |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Corey Minyard <wf-rch!minyard@relay.EU.net> | |
13 | * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> | |
14 | * Alan Cox, <Alan.Cox@linux.org> | |
15 | * Bjorn Ekwall. <bj0rn@blox.se> | |
16 | * Pekka Riikonen <priikone@poseidon.pspt.fi> | |
17 | * | |
18 | * This program is free software; you can redistribute it and/or | |
19 | * modify it under the terms of the GNU General Public License | |
20 | * as published by the Free Software Foundation; either version | |
21 | * 2 of the License, or (at your option) any later version. | |
22 | * | |
23 | * Moved to /usr/include/linux for NET3 | |
24 | */ | |
25 | #ifndef _LINUX_NETDEVICE_H | |
26 | #define _LINUX_NETDEVICE_H | |
27 | ||
28 | #include <linux/if.h> | |
29 | #include <linux/if_ether.h> | |
30 | #include <linux/if_packet.h> | |
31 | ||
32 | #ifdef __KERNEL__ | |
d7fe0f24 | 33 | #include <linux/timer.h> |
bea3348e | 34 | #include <linux/delay.h> |
1da177e4 LT |
35 | #include <asm/atomic.h> |
36 | #include <asm/cache.h> | |
37 | #include <asm/byteorder.h> | |
38 | ||
1da177e4 LT |
39 | #include <linux/device.h> |
40 | #include <linux/percpu.h> | |
db217334 | 41 | #include <linux/dmaengine.h> |
bea3348e | 42 | #include <linux/workqueue.h> |
1da177e4 | 43 | |
a050c33f DL |
44 | #include <net/net_namespace.h> |
45 | ||
1da177e4 LT |
46 | struct vlan_group; |
47 | struct ethtool_ops; | |
115c1d6e | 48 | struct netpoll_info; |
704232c2 JB |
49 | /* 802.11 specific */ |
50 | struct wireless_dev; | |
1da177e4 LT |
51 | /* source back-compat hooks */ |
52 | #define SET_ETHTOOL_OPS(netdev,ops) \ | |
53 | ( (netdev)->ethtool_ops = (ops) ) | |
54 | ||
55 | #define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev | |
56 | functions are available. */ | |
57 | #define HAVE_FREE_NETDEV /* free_netdev() */ | |
58 | #define HAVE_NETDEV_PRIV /* netdev_priv() */ | |
59 | ||
60 | #define NET_XMIT_SUCCESS 0 | |
61 | #define NET_XMIT_DROP 1 /* skb dropped */ | |
62 | #define NET_XMIT_CN 2 /* congestion notification */ | |
63 | #define NET_XMIT_POLICED 3 /* skb is shot by police */ | |
64 | #define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue; | |
65 | (TC use only - dev_queue_xmit | |
66 | returns this as NET_XMIT_SUCCESS) */ | |
67 | ||
68 | /* Backlog congestion levels */ | |
69 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ | |
70 | #define NET_RX_DROP 1 /* packet dropped */ | |
71 | #define NET_RX_CN_LOW 2 /* storm alert, just in case */ | |
72 | #define NET_RX_CN_MOD 3 /* Storm on its way! */ | |
73 | #define NET_RX_CN_HIGH 4 /* The storm is here */ | |
74 | #define NET_RX_BAD 5 /* packet dropped due to kernel error */ | |
75 | ||
b9df3cb8 GR |
76 | /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It |
77 | * indicates that the device will soon be dropping packets, or already drops | |
78 | * some packets of the same priority; prompting us to send less aggressively. */ | |
79 | #define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e)) | |
1da177e4 LT |
80 | #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) |
81 | ||
82 | #endif | |
83 | ||
84 | #define MAX_ADDR_LEN 32 /* Largest hardware address length */ | |
85 | ||
86 | /* Driver transmit return codes */ | |
87 | #define NETDEV_TX_OK 0 /* driver took care of packet */ | |
88 | #define NETDEV_TX_BUSY 1 /* driver tx path was busy*/ | |
89 | #define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */ | |
90 | ||
91 | /* | |
92 | * Compute the worst case header length according to the protocols | |
93 | * used. | |
94 | */ | |
95 | ||
8388e3da DM |
96 | #if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) |
97 | # if defined(CONFIG_MAC80211_MESH) | |
98 | # define LL_MAX_HEADER 128 | |
99 | # else | |
100 | # define LL_MAX_HEADER 96 | |
101 | # endif | |
102 | #elif defined(CONFIG_TR) | |
103 | # define LL_MAX_HEADER 48 | |
1da177e4 | 104 | #else |
8388e3da | 105 | # define LL_MAX_HEADER 32 |
1da177e4 LT |
106 | #endif |
107 | ||
e81c7359 DM |
108 | #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \ |
109 | !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \ | |
110 | !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \ | |
111 | !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE) | |
1da177e4 LT |
112 | #define MAX_HEADER LL_MAX_HEADER |
113 | #else | |
114 | #define MAX_HEADER (LL_MAX_HEADER + 48) | |
115 | #endif | |
116 | ||
f25f4e44 PWJ |
117 | struct net_device_subqueue |
118 | { | |
119 | /* Give a control state for each queue. This struct may contain | |
120 | * per-queue locks in the future. | |
121 | */ | |
122 | unsigned long state; | |
123 | }; | |
124 | ||
1da177e4 LT |
125 | /* |
126 | * Network device statistics. Akin to the 2.0 ether stats but | |
127 | * with byte counters. | |
128 | */ | |
129 | ||
130 | struct net_device_stats | |
131 | { | |
132 | unsigned long rx_packets; /* total packets received */ | |
133 | unsigned long tx_packets; /* total packets transmitted */ | |
134 | unsigned long rx_bytes; /* total bytes received */ | |
135 | unsigned long tx_bytes; /* total bytes transmitted */ | |
136 | unsigned long rx_errors; /* bad packets received */ | |
137 | unsigned long tx_errors; /* packet transmit problems */ | |
138 | unsigned long rx_dropped; /* no space in linux buffers */ | |
139 | unsigned long tx_dropped; /* no space available in linux */ | |
140 | unsigned long multicast; /* multicast packets received */ | |
141 | unsigned long collisions; | |
142 | ||
143 | /* detailed rx_errors: */ | |
144 | unsigned long rx_length_errors; | |
145 | unsigned long rx_over_errors; /* receiver ring buff overflow */ | |
146 | unsigned long rx_crc_errors; /* recved pkt with crc error */ | |
147 | unsigned long rx_frame_errors; /* recv'd frame alignment error */ | |
148 | unsigned long rx_fifo_errors; /* recv'r fifo overrun */ | |
149 | unsigned long rx_missed_errors; /* receiver missed packet */ | |
150 | ||
151 | /* detailed tx_errors */ | |
152 | unsigned long tx_aborted_errors; | |
153 | unsigned long tx_carrier_errors; | |
154 | unsigned long tx_fifo_errors; | |
155 | unsigned long tx_heartbeat_errors; | |
156 | unsigned long tx_window_errors; | |
157 | ||
158 | /* for cslip etc */ | |
159 | unsigned long rx_compressed; | |
160 | unsigned long tx_compressed; | |
161 | }; | |
162 | ||
163 | ||
164 | /* Media selection options. */ | |
165 | enum { | |
166 | IF_PORT_UNKNOWN = 0, | |
167 | IF_PORT_10BASE2, | |
168 | IF_PORT_10BASET, | |
169 | IF_PORT_AUI, | |
170 | IF_PORT_100BASET, | |
171 | IF_PORT_100BASETX, | |
172 | IF_PORT_100BASEFX | |
173 | }; | |
174 | ||
175 | #ifdef __KERNEL__ | |
176 | ||
177 | #include <linux/cache.h> | |
178 | #include <linux/skbuff.h> | |
179 | ||
180 | struct neighbour; | |
181 | struct neigh_parms; | |
182 | struct sk_buff; | |
183 | ||
184 | struct netif_rx_stats | |
185 | { | |
186 | unsigned total; | |
187 | unsigned dropped; | |
188 | unsigned time_squeeze; | |
1da177e4 LT |
189 | unsigned cpu_collision; |
190 | }; | |
191 | ||
192 | DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat); | |
193 | ||
bf742482 PM |
194 | struct dev_addr_list |
195 | { | |
196 | struct dev_addr_list *next; | |
197 | u8 da_addr[MAX_ADDR_LEN]; | |
198 | u8 da_addrlen; | |
a0a400d7 | 199 | u8 da_synced; |
bf742482 PM |
200 | int da_users; |
201 | int da_gusers; | |
202 | }; | |
1da177e4 LT |
203 | |
204 | /* | |
205 | * We tag multicasts with these structures. | |
206 | */ | |
3fba5a8b PM |
207 | |
208 | #define dev_mc_list dev_addr_list | |
209 | #define dmi_addr da_addr | |
210 | #define dmi_addrlen da_addrlen | |
211 | #define dmi_users da_users | |
212 | #define dmi_gusers da_gusers | |
1da177e4 LT |
213 | |
214 | struct hh_cache | |
215 | { | |
216 | struct hh_cache *hh_next; /* Next entry */ | |
217 | atomic_t hh_refcnt; /* number of users */ | |
f0490980 ED |
218 | /* |
219 | * We want hh_output, hh_len, hh_lock and hh_data be a in a separate | |
220 | * cache line on SMP. | |
221 | * They are mostly read, but hh_refcnt may be changed quite frequently, | |
222 | * incurring cache line ping pongs. | |
223 | */ | |
224 | __be16 hh_type ____cacheline_aligned_in_smp; | |
225 | /* protocol identifier, f.e ETH_P_IP | |
1da177e4 LT |
226 | * NOTE: For VLANs, this will be the |
227 | * encapuslated type. --BLG | |
228 | */ | |
d5c42c0e | 229 | u16 hh_len; /* length of header */ |
1da177e4 | 230 | int (*hh_output)(struct sk_buff *skb); |
3644f0ce | 231 | seqlock_t hh_lock; |
1da177e4 LT |
232 | |
233 | /* cached hardware header; allow for machine alignment needs. */ | |
234 | #define HH_DATA_MOD 16 | |
235 | #define HH_DATA_OFF(__len) \ | |
5ba0eac6 | 236 | (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) |
1da177e4 LT |
237 | #define HH_DATA_ALIGN(__len) \ |
238 | (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) | |
239 | unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; | |
240 | }; | |
241 | ||
242 | /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. | |
243 | * Alternative is: | |
244 | * dev->hard_header_len ? (dev->hard_header_len + | |
245 | * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 | |
246 | * | |
247 | * We could use other alignment values, but we must maintain the | |
248 | * relationship HH alignment <= LL alignment. | |
f5184d26 JB |
249 | * |
250 | * LL_ALLOCATED_SPACE also takes into account the tailroom the device | |
251 | * may need. | |
1da177e4 LT |
252 | */ |
253 | #define LL_RESERVED_SPACE(dev) \ | |
f5184d26 | 254 | ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
1da177e4 | 255 | #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ |
f5184d26 JB |
256 | ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
257 | #define LL_ALLOCATED_SPACE(dev) \ | |
258 | ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) | |
1da177e4 | 259 | |
3b04ddde SH |
260 | struct header_ops { |
261 | int (*create) (struct sk_buff *skb, struct net_device *dev, | |
262 | unsigned short type, const void *daddr, | |
263 | const void *saddr, unsigned len); | |
264 | int (*parse)(const struct sk_buff *skb, unsigned char *haddr); | |
265 | int (*rebuild)(struct sk_buff *skb); | |
266 | #define HAVE_HEADER_CACHE | |
267 | int (*cache)(const struct neighbour *neigh, struct hh_cache *hh); | |
268 | void (*cache_update)(struct hh_cache *hh, | |
269 | const struct net_device *dev, | |
270 | const unsigned char *haddr); | |
271 | }; | |
272 | ||
1da177e4 LT |
273 | /* These flag bits are private to the generic network queueing |
274 | * layer, they may not be explicitly referenced by any other | |
275 | * code. | |
276 | */ | |
277 | ||
278 | enum netdev_state_t | |
279 | { | |
280 | __LINK_STATE_XOFF=0, | |
281 | __LINK_STATE_START, | |
282 | __LINK_STATE_PRESENT, | |
283 | __LINK_STATE_SCHED, | |
284 | __LINK_STATE_NOCARRIER, | |
b00055aa SR |
285 | __LINK_STATE_LINKWATCH_PENDING, |
286 | __LINK_STATE_DORMANT, | |
48d83325 | 287 | __LINK_STATE_QDISC_RUNNING, |
1da177e4 LT |
288 | }; |
289 | ||
290 | ||
291 | /* | |
292 | * This structure holds at boot time configured netdevice settings. They | |
293 | * are then used in the device probing. | |
294 | */ | |
295 | struct netdev_boot_setup { | |
296 | char name[IFNAMSIZ]; | |
297 | struct ifmap map; | |
298 | }; | |
299 | #define NETDEV_BOOT_SETUP_MAX 8 | |
300 | ||
20380731 | 301 | extern int __init netdev_boot_setup(char *str); |
1da177e4 | 302 | |
bea3348e SH |
303 | /* |
304 | * Structure for NAPI scheduling similar to tasklet but with weighting | |
305 | */ | |
306 | struct napi_struct { | |
307 | /* The poll_list must only be managed by the entity which | |
308 | * changes the state of the NAPI_STATE_SCHED bit. This means | |
309 | * whoever atomically sets that bit can add this napi_struct | |
310 | * to the per-cpu poll_list, and whoever clears that bit | |
311 | * can remove from the list right before clearing the bit. | |
312 | */ | |
313 | struct list_head poll_list; | |
314 | ||
315 | unsigned long state; | |
316 | int weight; | |
317 | int (*poll)(struct napi_struct *, int); | |
318 | #ifdef CONFIG_NETPOLL | |
319 | spinlock_t poll_lock; | |
320 | int poll_owner; | |
321 | struct net_device *dev; | |
322 | struct list_head dev_list; | |
323 | #endif | |
324 | }; | |
325 | ||
326 | enum | |
327 | { | |
328 | NAPI_STATE_SCHED, /* Poll is scheduled */ | |
a0a46196 | 329 | NAPI_STATE_DISABLE, /* Disable pending */ |
bea3348e SH |
330 | }; |
331 | ||
b3c97528 | 332 | extern void __napi_schedule(struct napi_struct *n); |
bea3348e | 333 | |
a0a46196 DM |
334 | static inline int napi_disable_pending(struct napi_struct *n) |
335 | { | |
336 | return test_bit(NAPI_STATE_DISABLE, &n->state); | |
337 | } | |
338 | ||
bea3348e SH |
339 | /** |
340 | * napi_schedule_prep - check if napi can be scheduled | |
341 | * @n: napi context | |
342 | * | |
343 | * Test if NAPI routine is already running, and if not mark | |
344 | * it as running. This is used as a condition variable | |
a0a46196 DM |
345 | * insure only one NAPI poll instance runs. We also make |
346 | * sure there is no pending NAPI disable. | |
bea3348e SH |
347 | */ |
348 | static inline int napi_schedule_prep(struct napi_struct *n) | |
349 | { | |
a0a46196 DM |
350 | return !napi_disable_pending(n) && |
351 | !test_and_set_bit(NAPI_STATE_SCHED, &n->state); | |
bea3348e SH |
352 | } |
353 | ||
354 | /** | |
355 | * napi_schedule - schedule NAPI poll | |
356 | * @n: napi context | |
357 | * | |
358 | * Schedule NAPI poll routine to be called if it is not already | |
359 | * running. | |
360 | */ | |
361 | static inline void napi_schedule(struct napi_struct *n) | |
362 | { | |
363 | if (napi_schedule_prep(n)) | |
364 | __napi_schedule(n); | |
365 | } | |
366 | ||
bfe13f54 RD |
367 | /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ |
368 | static inline int napi_reschedule(struct napi_struct *napi) | |
369 | { | |
370 | if (napi_schedule_prep(napi)) { | |
371 | __napi_schedule(napi); | |
372 | return 1; | |
373 | } | |
374 | return 0; | |
375 | } | |
376 | ||
bea3348e SH |
377 | /** |
378 | * napi_complete - NAPI processing complete | |
379 | * @n: napi context | |
380 | * | |
381 | * Mark NAPI processing as complete. | |
382 | */ | |
383 | static inline void __napi_complete(struct napi_struct *n) | |
384 | { | |
385 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | |
386 | list_del(&n->poll_list); | |
387 | smp_mb__before_clear_bit(); | |
388 | clear_bit(NAPI_STATE_SCHED, &n->state); | |
389 | } | |
390 | ||
391 | static inline void napi_complete(struct napi_struct *n) | |
392 | { | |
50fd4407 DM |
393 | unsigned long flags; |
394 | ||
395 | local_irq_save(flags); | |
bea3348e | 396 | __napi_complete(n); |
50fd4407 | 397 | local_irq_restore(flags); |
bea3348e SH |
398 | } |
399 | ||
400 | /** | |
401 | * napi_disable - prevent NAPI from scheduling | |
402 | * @n: napi context | |
403 | * | |
404 | * Stop NAPI from being scheduled on this context. | |
405 | * Waits till any outstanding processing completes. | |
406 | */ | |
407 | static inline void napi_disable(struct napi_struct *n) | |
408 | { | |
a0a46196 | 409 | set_bit(NAPI_STATE_DISABLE, &n->state); |
bea3348e | 410 | while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) |
43cc7380 | 411 | msleep(1); |
a0a46196 | 412 | clear_bit(NAPI_STATE_DISABLE, &n->state); |
bea3348e SH |
413 | } |
414 | ||
415 | /** | |
416 | * napi_enable - enable NAPI scheduling | |
417 | * @n: napi context | |
418 | * | |
419 | * Resume NAPI from being scheduled on this context. | |
420 | * Must be paired with napi_disable. | |
421 | */ | |
422 | static inline void napi_enable(struct napi_struct *n) | |
423 | { | |
424 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | |
425 | smp_mb__before_clear_bit(); | |
426 | clear_bit(NAPI_STATE_SCHED, &n->state); | |
427 | } | |
428 | ||
c264c3de SH |
429 | #ifdef CONFIG_SMP |
430 | /** | |
431 | * napi_synchronize - wait until NAPI is not running | |
432 | * @n: napi context | |
433 | * | |
434 | * Wait until NAPI is done being scheduled on this context. | |
435 | * Waits till any outstanding processing completes but | |
436 | * does not disable future activations. | |
437 | */ | |
438 | static inline void napi_synchronize(const struct napi_struct *n) | |
439 | { | |
440 | while (test_bit(NAPI_STATE_SCHED, &n->state)) | |
441 | msleep(1); | |
442 | } | |
443 | #else | |
444 | # define napi_synchronize(n) barrier() | |
445 | #endif | |
446 | ||
1da177e4 LT |
447 | /* |
448 | * The DEVICE structure. | |
449 | * Actually, this whole structure is a big mistake. It mixes I/O | |
450 | * data with strictly "high-level" data, and it has to know about | |
451 | * almost every data structure used in the INET module. | |
452 | * | |
453 | * FIXME: cleanup struct net_device such that network protocol info | |
454 | * moves out. | |
455 | */ | |
456 | ||
457 | struct net_device | |
458 | { | |
459 | ||
460 | /* | |
461 | * This is the first field of the "visible" part of this structure | |
462 | * (i.e. as seen by users in the "Space.c" file). It is the name | |
463 | * the interface. | |
464 | */ | |
465 | char name[IFNAMSIZ]; | |
9356b8fc ED |
466 | /* device name hash chain */ |
467 | struct hlist_node name_hlist; | |
1da177e4 LT |
468 | |
469 | /* | |
470 | * I/O specific fields | |
471 | * FIXME: Merge these and struct ifmap into one | |
472 | */ | |
473 | unsigned long mem_end; /* shared mem end */ | |
474 | unsigned long mem_start; /* shared mem start */ | |
475 | unsigned long base_addr; /* device I/O address */ | |
476 | unsigned int irq; /* device IRQ number */ | |
477 | ||
478 | /* | |
479 | * Some hardware also needs these fields, but they are not | |
480 | * part of the usual set specified in Space.c. | |
481 | */ | |
482 | ||
483 | unsigned char if_port; /* Selectable AUI, TP,..*/ | |
484 | unsigned char dma; /* DMA channel */ | |
485 | ||
486 | unsigned long state; | |
487 | ||
7562f876 | 488 | struct list_head dev_list; |
bea3348e SH |
489 | #ifdef CONFIG_NETPOLL |
490 | struct list_head napi_list; | |
491 | #endif | |
1da177e4 LT |
492 | |
493 | /* The device initialization function. Called only once. */ | |
494 | int (*init)(struct net_device *dev); | |
495 | ||
496 | /* ------- Fields preinitialized in Space.c finish here ------- */ | |
497 | ||
9356b8fc ED |
498 | /* Net device features */ |
499 | unsigned long features; | |
500 | #define NETIF_F_SG 1 /* Scatter/gather IO. */ | |
d212f87b | 501 | #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */ |
9356b8fc ED |
502 | #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ |
503 | #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */ | |
d212f87b | 504 | #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */ |
9356b8fc ED |
505 | #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */ |
506 | #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */ | |
507 | #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */ | |
508 | #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ | |
509 | #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ | |
510 | #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ | |
37c3185a | 511 | #define NETIF_F_GSO 2048 /* Enable software GSO. */ |
e24eb521 CB |
512 | #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */ |
513 | /* do not use LLTX in new drivers */ | |
ce286d32 | 514 | #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */ |
f25f4e44 | 515 | #define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */ |
3ae7c0b2 | 516 | #define NETIF_F_LRO 32768 /* large receive offload */ |
7967168c HX |
517 | |
518 | /* Segmentation offload features */ | |
289c79a4 PM |
519 | #define NETIF_F_GSO_SHIFT 16 |
520 | #define NETIF_F_GSO_MASK 0xffff0000 | |
7967168c | 521 | #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) |
f83ef8c0 | 522 | #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT) |
576a30eb | 523 | #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) |
f83ef8c0 HX |
524 | #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT) |
525 | #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) | |
9356b8fc | 526 | |
78eb8877 HX |
527 | /* List of features with software fallbacks. */ |
528 | #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) | |
529 | ||
d212f87b | 530 | |
8648b305 | 531 | #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) |
d212f87b SH |
532 | #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM) |
533 | #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) | |
534 | #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) | |
8648b305 | 535 | |
1da177e4 LT |
536 | struct net_device *next_sched; |
537 | ||
538 | /* Interface index. Unique device identifier */ | |
539 | int ifindex; | |
540 | int iflink; | |
541 | ||
542 | ||
543 | struct net_device_stats* (*get_stats)(struct net_device *dev); | |
c45d286e | 544 | struct net_device_stats stats; |
1da177e4 | 545 | |
b86e0280 | 546 | #ifdef CONFIG_WIRELESS_EXT |
1da177e4 LT |
547 | /* List of functions to handle Wireless Extensions (instead of ioctl). |
548 | * See <net/iw_handler.h> for details. Jean II */ | |
549 | const struct iw_handler_def * wireless_handlers; | |
550 | /* Instance data managed by the core of Wireless Extensions. */ | |
551 | struct iw_public_data * wireless_data; | |
b86e0280 | 552 | #endif |
76fd8593 | 553 | const struct ethtool_ops *ethtool_ops; |
1da177e4 | 554 | |
3b04ddde SH |
555 | /* Hardware header description */ |
556 | const struct header_ops *header_ops; | |
557 | ||
1da177e4 LT |
558 | /* |
559 | * This marks the end of the "visible" part of the structure. All | |
560 | * fields hereafter are internal to the system, and may change at | |
561 | * will (read: may be cleaned up at will). | |
562 | */ | |
563 | ||
1da177e4 | 564 | |
b00055aa | 565 | unsigned int flags; /* interface flags (a la BSD) */ |
1da177e4 LT |
566 | unsigned short gflags; |
567 | unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */ | |
568 | unsigned short padded; /* How much padding added by alloc_netdev() */ | |
569 | ||
b00055aa SR |
570 | unsigned char operstate; /* RFC2863 operstate */ |
571 | unsigned char link_mode; /* mapping policy to operstate */ | |
572 | ||
1da177e4 LT |
573 | unsigned mtu; /* interface MTU value */ |
574 | unsigned short type; /* interface hardware type */ | |
575 | unsigned short hard_header_len; /* hardware hdr length */ | |
1da177e4 | 576 | |
f5184d26 JB |
577 | /* extra head- and tailroom the hardware may need, but not in all cases |
578 | * can this be guaranteed, especially tailroom. Some cases also use | |
579 | * LL_MAX_HEADER instead to allocate the skb. | |
580 | */ | |
581 | unsigned short needed_headroom; | |
582 | unsigned short needed_tailroom; | |
583 | ||
1da177e4 LT |
584 | struct net_device *master; /* Pointer to master device of a group, |
585 | * which this device is member of. | |
586 | */ | |
587 | ||
588 | /* Interface address info. */ | |
a6f9a705 | 589 | unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ |
1da177e4 LT |
590 | unsigned char addr_len; /* hardware address length */ |
591 | unsigned short dev_id; /* for shared network cards */ | |
592 | ||
4417da66 PM |
593 | struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */ |
594 | int uc_count; /* Number of installed ucasts */ | |
595 | int uc_promisc; | |
3fba5a8b | 596 | struct dev_addr_list *mc_list; /* Multicast mac addresses */ |
1da177e4 LT |
597 | int mc_count; /* Number of installed mcasts */ |
598 | int promiscuity; | |
599 | int allmulti; | |
600 | ||
1da177e4 LT |
601 | |
602 | /* Protocol specific pointers */ | |
603 | ||
604 | void *atalk_ptr; /* AppleTalk link */ | |
605 | void *ip_ptr; /* IPv4 specific data */ | |
606 | void *dn_ptr; /* DECnet specific data */ | |
607 | void *ip6_ptr; /* IPv6 specific data */ | |
608 | void *ec_ptr; /* Econet specific data */ | |
609 | void *ax25_ptr; /* AX.25 specific data */ | |
704232c2 JB |
610 | struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data, |
611 | assign before registering */ | |
1da177e4 | 612 | |
9356b8fc ED |
613 | /* |
614 | * Cache line mostly used on receive path (including eth_type_trans()) | |
615 | */ | |
9356b8fc ED |
616 | unsigned long last_rx; /* Time of last Rx */ |
617 | /* Interface address info used in eth_type_trans() */ | |
618 | unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast | |
619 | because most packets are unicast) */ | |
620 | ||
621 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ | |
1da177e4 | 622 | |
d5bd0146 NT |
623 | /* ingress path synchronizer */ |
624 | spinlock_t ingress_lock; | |
625 | struct Qdisc *qdisc_ingress; | |
626 | ||
9356b8fc ED |
627 | /* |
628 | * Cache line mostly used on queue transmit path (qdisc) | |
629 | */ | |
630 | /* device queue lock */ | |
631 | spinlock_t queue_lock ____cacheline_aligned_in_smp; | |
1da177e4 LT |
632 | struct Qdisc *qdisc; |
633 | struct Qdisc *qdisc_sleeping; | |
1da177e4 LT |
634 | struct list_head qdisc_list; |
635 | unsigned long tx_queue_len; /* Max frames per queue allowed */ | |
636 | ||
f6a78bfc HX |
637 | /* Partially transmitted GSO packet. */ |
638 | struct sk_buff *gso_skb; | |
639 | ||
9356b8fc ED |
640 | /* |
641 | * One part is mostly used on xmit path (device) | |
642 | */ | |
1da177e4 | 643 | /* hard_start_xmit synchronizer */ |
932ff279 | 644 | spinlock_t _xmit_lock ____cacheline_aligned_in_smp; |
1da177e4 LT |
645 | /* cpu id of processor entered to hard_start_xmit or -1, |
646 | if nobody entered there. | |
647 | */ | |
648 | int xmit_lock_owner; | |
9356b8fc ED |
649 | void *priv; /* pointer to private data */ |
650 | int (*hard_start_xmit) (struct sk_buff *skb, | |
651 | struct net_device *dev); | |
652 | /* These may be needed for future network-power-down code. */ | |
653 | unsigned long trans_start; /* Time (in jiffies) of last Tx */ | |
654 | ||
655 | int watchdog_timeo; /* used by dev_watchdog() */ | |
656 | struct timer_list watchdog_timer; | |
657 | ||
658 | /* | |
659 | * refcnt is a very hot point, so align it on SMP | |
660 | */ | |
1da177e4 | 661 | /* Number of references to this device */ |
9356b8fc ED |
662 | atomic_t refcnt ____cacheline_aligned_in_smp; |
663 | ||
1da177e4 LT |
664 | /* delayed register/unregister */ |
665 | struct list_head todo_list; | |
1da177e4 LT |
666 | /* device index hash chain */ |
667 | struct hlist_node index_hlist; | |
668 | ||
572a103d HX |
669 | struct net_device *link_watch_next; |
670 | ||
1da177e4 LT |
671 | /* register/unregister state machine */ |
672 | enum { NETREG_UNINITIALIZED=0, | |
b17a7c17 | 673 | NETREG_REGISTERED, /* completed register_netdevice */ |
1da177e4 LT |
674 | NETREG_UNREGISTERING, /* called unregister_netdevice */ |
675 | NETREG_UNREGISTERED, /* completed unregister todo */ | |
676 | NETREG_RELEASED, /* called free_netdev */ | |
677 | } reg_state; | |
678 | ||
1da177e4 LT |
679 | /* Called after device is detached from network. */ |
680 | void (*uninit)(struct net_device *dev); | |
681 | /* Called after last user reference disappears. */ | |
682 | void (*destructor)(struct net_device *dev); | |
683 | ||
684 | /* Pointers to interface service routines. */ | |
685 | int (*open)(struct net_device *dev); | |
686 | int (*stop)(struct net_device *dev); | |
1da177e4 | 687 | #define HAVE_NETDEV_POLL |
24023451 PM |
688 | #define HAVE_CHANGE_RX_FLAGS |
689 | void (*change_rx_flags)(struct net_device *dev, | |
690 | int flags); | |
4417da66 PM |
691 | #define HAVE_SET_RX_MODE |
692 | void (*set_rx_mode)(struct net_device *dev); | |
1da177e4 LT |
693 | #define HAVE_MULTICAST |
694 | void (*set_multicast_list)(struct net_device *dev); | |
695 | #define HAVE_SET_MAC_ADDR | |
696 | int (*set_mac_address)(struct net_device *dev, | |
697 | void *addr); | |
bada339b JG |
698 | #define HAVE_VALIDATE_ADDR |
699 | int (*validate_addr)(struct net_device *dev); | |
1da177e4 LT |
700 | #define HAVE_PRIVATE_IOCTL |
701 | int (*do_ioctl)(struct net_device *dev, | |
702 | struct ifreq *ifr, int cmd); | |
703 | #define HAVE_SET_CONFIG | |
704 | int (*set_config)(struct net_device *dev, | |
705 | struct ifmap *map); | |
1da177e4 LT |
706 | #define HAVE_CHANGE_MTU |
707 | int (*change_mtu)(struct net_device *dev, int new_mtu); | |
708 | ||
709 | #define HAVE_TX_TIMEOUT | |
710 | void (*tx_timeout) (struct net_device *dev); | |
711 | ||
712 | void (*vlan_rx_register)(struct net_device *dev, | |
713 | struct vlan_group *grp); | |
714 | void (*vlan_rx_add_vid)(struct net_device *dev, | |
715 | unsigned short vid); | |
716 | void (*vlan_rx_kill_vid)(struct net_device *dev, | |
717 | unsigned short vid); | |
718 | ||
1da177e4 LT |
719 | int (*neigh_setup)(struct net_device *dev, struct neigh_parms *); |
720 | #ifdef CONFIG_NETPOLL | |
115c1d6e | 721 | struct netpoll_info *npinfo; |
1da177e4 LT |
722 | #endif |
723 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
724 | void (*poll_controller)(struct net_device *dev); | |
725 | #endif | |
726 | ||
c346dca1 | 727 | #ifdef CONFIG_NET_NS |
4a1c5371 EB |
728 | /* Network namespace this network device is inside */ |
729 | struct net *nd_net; | |
c346dca1 | 730 | #endif |
4a1c5371 | 731 | |
4951704b DM |
732 | /* mid-layer private */ |
733 | void *ml_priv; | |
734 | ||
1da177e4 LT |
735 | /* bridge stuff */ |
736 | struct net_bridge_port *br_port; | |
b863ceb7 PM |
737 | /* macvlan */ |
738 | struct macvlan_port *macvlan_port; | |
1da177e4 | 739 | |
1da177e4 | 740 | /* class/net/name entry */ |
43cb76d9 | 741 | struct device dev; |
fe9925b5 SH |
742 | /* space for optional statistics and wireless sysfs groups */ |
743 | struct attribute_group *sysfs_groups[3]; | |
38f7b870 PM |
744 | |
745 | /* rtnetlink link ops */ | |
746 | const struct rtnl_link_ops *rtnl_link_ops; | |
f25f4e44 | 747 | |
289c79a4 PM |
748 | /* VLAN feature mask */ |
749 | unsigned long vlan_features; | |
750 | ||
82cc1a7a PWJ |
751 | /* for setting kernel sock attribute on TCP connection setup */ |
752 | #define GSO_MAX_SIZE 65536 | |
753 | unsigned int gso_max_size; | |
754 | ||
f25f4e44 PWJ |
755 | /* The TX queue control structures */ |
756 | unsigned int egress_subqueue_count; | |
31ce72a6 | 757 | struct net_device_subqueue egress_subqueue[1]; |
1da177e4 | 758 | }; |
43cb76d9 | 759 | #define to_net_dev(d) container_of(d, struct net_device, dev) |
1da177e4 LT |
760 | |
761 | #define NETDEV_ALIGN 32 | |
762 | #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) | |
763 | ||
c346dca1 YH |
764 | /* |
765 | * Net namespace inlines | |
766 | */ | |
767 | static inline | |
768 | struct net *dev_net(const struct net_device *dev) | |
769 | { | |
770 | #ifdef CONFIG_NET_NS | |
771 | return dev->nd_net; | |
772 | #else | |
773 | return &init_net; | |
774 | #endif | |
775 | } | |
776 | ||
777 | static inline | |
f5aa23fd | 778 | void dev_net_set(struct net_device *dev, struct net *net) |
c346dca1 YH |
779 | { |
780 | #ifdef CONFIG_NET_NS | |
f3005d7f DL |
781 | release_net(dev->nd_net); |
782 | dev->nd_net = hold_net(net); | |
c346dca1 YH |
783 | #endif |
784 | } | |
785 | ||
bea3348e SH |
786 | /** |
787 | * netdev_priv - access network device private data | |
788 | * @dev: network device | |
789 | * | |
790 | * Get network device private data | |
791 | */ | |
6472ce60 | 792 | static inline void *netdev_priv(const struct net_device *dev) |
1da177e4 | 793 | { |
f25f4e44 | 794 | return dev->priv; |
1da177e4 LT |
795 | } |
796 | ||
1da177e4 LT |
797 | /* Set the sysfs physical device reference for the network logical device |
798 | * if set prior to registration will cause a symlink during initialization. | |
799 | */ | |
43cb76d9 | 800 | #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) |
1da177e4 | 801 | |
3b582cc1 SH |
802 | /** |
803 | * netif_napi_add - initialize a napi context | |
804 | * @dev: network device | |
805 | * @napi: napi context | |
806 | * @poll: polling function | |
807 | * @weight: default weight | |
808 | * | |
809 | * netif_napi_add() must be used to initialize a napi context prior to calling | |
810 | * *any* of the other napi related functions. | |
811 | */ | |
bea3348e SH |
812 | static inline void netif_napi_add(struct net_device *dev, |
813 | struct napi_struct *napi, | |
814 | int (*poll)(struct napi_struct *, int), | |
815 | int weight) | |
816 | { | |
817 | INIT_LIST_HEAD(&napi->poll_list); | |
818 | napi->poll = poll; | |
819 | napi->weight = weight; | |
820 | #ifdef CONFIG_NETPOLL | |
821 | napi->dev = dev; | |
822 | list_add(&napi->dev_list, &dev->napi_list); | |
823 | spin_lock_init(&napi->poll_lock); | |
824 | napi->poll_owner = -1; | |
825 | #endif | |
826 | set_bit(NAPI_STATE_SCHED, &napi->state); | |
827 | } | |
828 | ||
1da177e4 | 829 | struct packet_type { |
f2ccd8fa DM |
830 | __be16 type; /* This is really htons(ether_type). */ |
831 | struct net_device *dev; /* NULL is wildcarded here */ | |
832 | int (*func) (struct sk_buff *, | |
833 | struct net_device *, | |
834 | struct packet_type *, | |
835 | struct net_device *); | |
576a30eb HX |
836 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, |
837 | int features); | |
a430a43d | 838 | int (*gso_send_check)(struct sk_buff *skb); |
1da177e4 LT |
839 | void *af_packet_priv; |
840 | struct list_head list; | |
841 | }; | |
842 | ||
843 | #include <linux/interrupt.h> | |
844 | #include <linux/notifier.h> | |
845 | ||
1da177e4 LT |
846 | extern rwlock_t dev_base_lock; /* Device list lock */ |
847 | ||
7562f876 | 848 | |
881d966b EB |
849 | #define for_each_netdev(net, d) \ |
850 | list_for_each_entry(d, &(net)->dev_base_head, dev_list) | |
851 | #define for_each_netdev_safe(net, d, n) \ | |
852 | list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) | |
853 | #define for_each_netdev_continue(net, d) \ | |
854 | list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) | |
855 | #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) | |
7562f876 | 856 | |
a050c33f DL |
857 | static inline struct net_device *next_net_device(struct net_device *dev) |
858 | { | |
859 | struct list_head *lh; | |
860 | struct net *net; | |
861 | ||
c346dca1 | 862 | net = dev_net(dev); |
a050c33f DL |
863 | lh = dev->dev_list.next; |
864 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | |
865 | } | |
866 | ||
867 | static inline struct net_device *first_net_device(struct net *net) | |
868 | { | |
869 | return list_empty(&net->dev_base_head) ? NULL : | |
870 | net_device_entry(net->dev_base_head.next); | |
871 | } | |
7562f876 | 872 | |
1da177e4 LT |
873 | extern int netdev_boot_setup_check(struct net_device *dev); |
874 | extern unsigned long netdev_boot_base(const char *prefix, int unit); | |
881d966b EB |
875 | extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr); |
876 | extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); | |
877 | extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); | |
1da177e4 LT |
878 | extern void dev_add_pack(struct packet_type *pt); |
879 | extern void dev_remove_pack(struct packet_type *pt); | |
880 | extern void __dev_remove_pack(struct packet_type *pt); | |
881 | ||
881d966b | 882 | extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags, |
1da177e4 | 883 | unsigned short mask); |
881d966b EB |
884 | extern struct net_device *dev_get_by_name(struct net *net, const char *name); |
885 | extern struct net_device *__dev_get_by_name(struct net *net, const char *name); | |
1da177e4 LT |
886 | extern int dev_alloc_name(struct net_device *dev, const char *name); |
887 | extern int dev_open(struct net_device *dev); | |
888 | extern int dev_close(struct net_device *dev); | |
889 | extern int dev_queue_xmit(struct sk_buff *skb); | |
890 | extern int register_netdevice(struct net_device *dev); | |
22f8cde5 | 891 | extern void unregister_netdevice(struct net_device *dev); |
1da177e4 LT |
892 | extern void free_netdev(struct net_device *dev); |
893 | extern void synchronize_net(void); | |
894 | extern int register_netdevice_notifier(struct notifier_block *nb); | |
895 | extern int unregister_netdevice_notifier(struct notifier_block *nb); | |
ad7379d4 | 896 | extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); |
881d966b EB |
897 | extern struct net_device *dev_get_by_index(struct net *net, int ifindex); |
898 | extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); | |
1da177e4 LT |
899 | extern int dev_restart(struct net_device *dev); |
900 | #ifdef CONFIG_NETPOLL_TRAP | |
901 | extern int netpoll_trap(void); | |
902 | #endif | |
903 | ||
0c4e8581 SH |
904 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, |
905 | unsigned short type, | |
3b04ddde SH |
906 | const void *daddr, const void *saddr, |
907 | unsigned len) | |
0c4e8581 | 908 | { |
f1ecfd5d | 909 | if (!dev->header_ops || !dev->header_ops->create) |
0c4e8581 | 910 | return 0; |
3b04ddde SH |
911 | |
912 | return dev->header_ops->create(skb, dev, type, daddr, saddr, len); | |
0c4e8581 SH |
913 | } |
914 | ||
b95cce35 SH |
915 | static inline int dev_parse_header(const struct sk_buff *skb, |
916 | unsigned char *haddr) | |
917 | { | |
918 | const struct net_device *dev = skb->dev; | |
919 | ||
1b83336b | 920 | if (!dev->header_ops || !dev->header_ops->parse) |
b95cce35 | 921 | return 0; |
3b04ddde | 922 | return dev->header_ops->parse(skb, haddr); |
b95cce35 SH |
923 | } |
924 | ||
1da177e4 LT |
925 | typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); |
926 | extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf); | |
927 | static inline int unregister_gifconf(unsigned int family) | |
928 | { | |
929 | return register_gifconf(family, NULL); | |
930 | } | |
931 | ||
932 | /* | |
933 | * Incoming packets are placed on per-cpu queues so that | |
934 | * no locking is needed. | |
935 | */ | |
1da177e4 LT |
936 | struct softnet_data |
937 | { | |
31aa02c5 | 938 | struct net_device *output_queue; |
1da177e4 LT |
939 | struct sk_buff_head input_pkt_queue; |
940 | struct list_head poll_list; | |
1da177e4 LT |
941 | struct sk_buff *completion_queue; |
942 | ||
bea3348e | 943 | struct napi_struct backlog; |
db217334 CL |
944 | #ifdef CONFIG_NET_DMA |
945 | struct dma_chan *net_dma; | |
946 | #endif | |
1da177e4 LT |
947 | }; |
948 | ||
949 | DECLARE_PER_CPU(struct softnet_data,softnet_data); | |
950 | ||
951 | #define HAVE_NETIF_QUEUE | |
952 | ||
56079431 | 953 | extern void __netif_schedule(struct net_device *dev); |
1da177e4 LT |
954 | |
955 | static inline void netif_schedule(struct net_device *dev) | |
956 | { | |
957 | if (!test_bit(__LINK_STATE_XOFF, &dev->state)) | |
958 | __netif_schedule(dev); | |
959 | } | |
960 | ||
bea3348e SH |
961 | /** |
962 | * netif_start_queue - allow transmit | |
963 | * @dev: network device | |
964 | * | |
965 | * Allow upper layers to call the device hard_start_xmit routine. | |
966 | */ | |
1da177e4 LT |
967 | static inline void netif_start_queue(struct net_device *dev) |
968 | { | |
969 | clear_bit(__LINK_STATE_XOFF, &dev->state); | |
970 | } | |
971 | ||
bea3348e SH |
972 | /** |
973 | * netif_wake_queue - restart transmit | |
974 | * @dev: network device | |
975 | * | |
976 | * Allow upper layers to call the device hard_start_xmit routine. | |
977 | * Used for flow control when transmit resources are available. | |
978 | */ | |
1da177e4 LT |
979 | static inline void netif_wake_queue(struct net_device *dev) |
980 | { | |
981 | #ifdef CONFIG_NETPOLL_TRAP | |
5f286e11 SS |
982 | if (netpoll_trap()) { |
983 | clear_bit(__LINK_STATE_XOFF, &dev->state); | |
1da177e4 | 984 | return; |
5f286e11 | 985 | } |
1da177e4 LT |
986 | #endif |
987 | if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state)) | |
988 | __netif_schedule(dev); | |
989 | } | |
990 | ||
bea3348e SH |
991 | /** |
992 | * netif_stop_queue - stop transmitted packets | |
993 | * @dev: network device | |
994 | * | |
995 | * Stop upper layers calling the device hard_start_xmit routine. | |
996 | * Used for flow control when transmit resources are unavailable. | |
997 | */ | |
1da177e4 LT |
998 | static inline void netif_stop_queue(struct net_device *dev) |
999 | { | |
1da177e4 LT |
1000 | set_bit(__LINK_STATE_XOFF, &dev->state); |
1001 | } | |
1002 | ||
bea3348e SH |
1003 | /** |
1004 | * netif_queue_stopped - test if transmit queue is flowblocked | |
1005 | * @dev: network device | |
1006 | * | |
1007 | * Test if transmit queue on device is currently unable to send. | |
1008 | */ | |
1da177e4 LT |
1009 | static inline int netif_queue_stopped(const struct net_device *dev) |
1010 | { | |
1011 | return test_bit(__LINK_STATE_XOFF, &dev->state); | |
1012 | } | |
1013 | ||
bea3348e SH |
1014 | /** |
1015 | * netif_running - test if up | |
1016 | * @dev: network device | |
1017 | * | |
1018 | * Test if the device has been brought up. | |
1019 | */ | |
1da177e4 LT |
1020 | static inline int netif_running(const struct net_device *dev) |
1021 | { | |
1022 | return test_bit(__LINK_STATE_START, &dev->state); | |
1023 | } | |
1024 | ||
f25f4e44 PWJ |
1025 | /* |
1026 | * Routines to manage the subqueues on a device. We only need start | |
1027 | * stop, and a check if it's stopped. All other device management is | |
1028 | * done at the overall netdevice level. | |
1029 | * Also test the device if we're multiqueue. | |
1030 | */ | |
bea3348e SH |
1031 | |
1032 | /** | |
1033 | * netif_start_subqueue - allow sending packets on subqueue | |
1034 | * @dev: network device | |
1035 | * @queue_index: sub queue index | |
1036 | * | |
1037 | * Start individual transmit queue of a device with multiple transmit queues. | |
1038 | */ | |
f25f4e44 PWJ |
1039 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) |
1040 | { | |
1041 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | |
1042 | clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); | |
1043 | #endif | |
1044 | } | |
1045 | ||
bea3348e SH |
1046 | /** |
1047 | * netif_stop_subqueue - stop sending packets on subqueue | |
1048 | * @dev: network device | |
1049 | * @queue_index: sub queue index | |
1050 | * | |
1051 | * Stop individual transmit queue of a device with multiple transmit queues. | |
1052 | */ | |
f25f4e44 PWJ |
1053 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) |
1054 | { | |
1055 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | |
1056 | #ifdef CONFIG_NETPOLL_TRAP | |
1057 | if (netpoll_trap()) | |
1058 | return; | |
1059 | #endif | |
1060 | set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); | |
1061 | #endif | |
1062 | } | |
1063 | ||
bea3348e SH |
1064 | /** |
1065 | * netif_subqueue_stopped - test status of subqueue | |
1066 | * @dev: network device | |
1067 | * @queue_index: sub queue index | |
1068 | * | |
1069 | * Check individual transmit queue of a device with multiple transmit queues. | |
1070 | */ | |
668f895a | 1071 | static inline int __netif_subqueue_stopped(const struct net_device *dev, |
f25f4e44 PWJ |
1072 | u16 queue_index) |
1073 | { | |
1074 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | |
1075 | return test_bit(__LINK_STATE_XOFF, | |
1076 | &dev->egress_subqueue[queue_index].state); | |
1077 | #else | |
1078 | return 0; | |
1079 | #endif | |
1080 | } | |
1081 | ||
668f895a PE |
1082 | static inline int netif_subqueue_stopped(const struct net_device *dev, |
1083 | struct sk_buff *skb) | |
1084 | { | |
1085 | return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); | |
1086 | } | |
bea3348e SH |
1087 | |
1088 | /** | |
1089 | * netif_wake_subqueue - allow sending packets on subqueue | |
1090 | * @dev: network device | |
1091 | * @queue_index: sub queue index | |
1092 | * | |
1093 | * Resume individual transmit queue of a device with multiple transmit queues. | |
1094 | */ | |
f25f4e44 PWJ |
1095 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) |
1096 | { | |
1097 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | |
1098 | #ifdef CONFIG_NETPOLL_TRAP | |
1099 | if (netpoll_trap()) | |
1100 | return; | |
1101 | #endif | |
1102 | if (test_and_clear_bit(__LINK_STATE_XOFF, | |
1103 | &dev->egress_subqueue[queue_index].state)) | |
1104 | __netif_schedule(dev); | |
1105 | #endif | |
1106 | } | |
1107 | ||
bea3348e SH |
1108 | /** |
1109 | * netif_is_multiqueue - test if device has multiple transmit queues | |
1110 | * @dev: network device | |
1111 | * | |
1112 | * Check if device has multiple transmit queues | |
1113 | * Always falls if NETDEVICE_MULTIQUEUE is not configured | |
1114 | */ | |
f25f4e44 PWJ |
1115 | static inline int netif_is_multiqueue(const struct net_device *dev) |
1116 | { | |
1117 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | |
1118 | return (!!(NETIF_F_MULTI_QUEUE & dev->features)); | |
1119 | #else | |
1120 | return 0; | |
1121 | #endif | |
1122 | } | |
1da177e4 LT |
1123 | |
1124 | /* Use this variant when it is known for sure that it | |
0ef47309 ML |
1125 | * is executing from hardware interrupt context or with hardware interrupts |
1126 | * disabled. | |
1da177e4 | 1127 | */ |
bea3348e | 1128 | extern void dev_kfree_skb_irq(struct sk_buff *skb); |
1da177e4 LT |
1129 | |
1130 | /* Use this variant in places where it could be invoked | |
0ef47309 ML |
1131 | * from either hardware interrupt or other context, with hardware interrupts |
1132 | * either disabled or enabled. | |
1da177e4 | 1133 | */ |
56079431 | 1134 | extern void dev_kfree_skb_any(struct sk_buff *skb); |
1da177e4 LT |
1135 | |
1136 | #define HAVE_NETIF_RX 1 | |
1137 | extern int netif_rx(struct sk_buff *skb); | |
1138 | extern int netif_rx_ni(struct sk_buff *skb); | |
1139 | #define HAVE_NETIF_RECEIVE_SKB 1 | |
1140 | extern int netif_receive_skb(struct sk_buff *skb); | |
c2373ee9 | 1141 | extern int dev_valid_name(const char *name); |
881d966b EB |
1142 | extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); |
1143 | extern int dev_ethtool(struct net *net, struct ifreq *); | |
1da177e4 LT |
1144 | extern unsigned dev_get_flags(const struct net_device *); |
1145 | extern int dev_change_flags(struct net_device *, unsigned); | |
1146 | extern int dev_change_name(struct net_device *, char *); | |
ce286d32 EB |
1147 | extern int dev_change_net_namespace(struct net_device *, |
1148 | struct net *, const char *); | |
1da177e4 LT |
1149 | extern int dev_set_mtu(struct net_device *, int); |
1150 | extern int dev_set_mac_address(struct net_device *, | |
1151 | struct sockaddr *); | |
f6a78bfc HX |
1152 | extern int dev_hard_start_xmit(struct sk_buff *skb, |
1153 | struct net_device *dev); | |
1da177e4 | 1154 | |
20380731 | 1155 | extern int netdev_budget; |
1da177e4 LT |
1156 | |
1157 | /* Called by rtnetlink.c:rtnl_unlock() */ | |
1158 | extern void netdev_run_todo(void); | |
1159 | ||
bea3348e SH |
1160 | /** |
1161 | * dev_put - release reference to device | |
1162 | * @dev: network device | |
1163 | * | |
9ef4429b | 1164 | * Release reference to device to allow it to be freed. |
bea3348e | 1165 | */ |
1da177e4 LT |
1166 | static inline void dev_put(struct net_device *dev) |
1167 | { | |
1168 | atomic_dec(&dev->refcnt); | |
1169 | } | |
1170 | ||
bea3348e SH |
1171 | /** |
1172 | * dev_hold - get reference to device | |
1173 | * @dev: network device | |
1174 | * | |
9ef4429b | 1175 | * Hold reference to device to keep it from being freed. |
bea3348e | 1176 | */ |
15333061 SH |
1177 | static inline void dev_hold(struct net_device *dev) |
1178 | { | |
1179 | atomic_inc(&dev->refcnt); | |
1180 | } | |
1da177e4 LT |
1181 | |
1182 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on | |
1183 | * and _off may be called from IRQ context, but it is caller | |
1184 | * who is responsible for serialization of these calls. | |
b00055aa SR |
1185 | * |
1186 | * The name carrier is inappropriate, these functions should really be | |
1187 | * called netif_lowerlayer_*() because they represent the state of any | |
1188 | * kind of lower layer not just hardware media. | |
1da177e4 LT |
1189 | */ |
1190 | ||
1191 | extern void linkwatch_fire_event(struct net_device *dev); | |
1192 | ||
bea3348e SH |
1193 | /** |
1194 | * netif_carrier_ok - test if carrier present | |
1195 | * @dev: network device | |
1196 | * | |
1197 | * Check if carrier is present on device | |
1198 | */ | |
1da177e4 LT |
1199 | static inline int netif_carrier_ok(const struct net_device *dev) |
1200 | { | |
1201 | return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); | |
1202 | } | |
1203 | ||
1204 | extern void __netdev_watchdog_up(struct net_device *dev); | |
1205 | ||
0a242efc | 1206 | extern void netif_carrier_on(struct net_device *dev); |
1da177e4 | 1207 | |
0a242efc | 1208 | extern void netif_carrier_off(struct net_device *dev); |
1da177e4 | 1209 | |
bea3348e SH |
1210 | /** |
1211 | * netif_dormant_on - mark device as dormant. | |
1212 | * @dev: network device | |
1213 | * | |
1214 | * Mark device as dormant (as per RFC2863). | |
1215 | * | |
1216 | * The dormant state indicates that the relevant interface is not | |
1217 | * actually in a condition to pass packets (i.e., it is not 'up') but is | |
1218 | * in a "pending" state, waiting for some external event. For "on- | |
1219 | * demand" interfaces, this new state identifies the situation where the | |
1220 | * interface is waiting for events to place it in the up state. | |
1221 | * | |
1222 | */ | |
b00055aa SR |
1223 | static inline void netif_dormant_on(struct net_device *dev) |
1224 | { | |
1225 | if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) | |
1226 | linkwatch_fire_event(dev); | |
1227 | } | |
1228 | ||
bea3348e SH |
1229 | /** |
1230 | * netif_dormant_off - set device as not dormant. | |
1231 | * @dev: network device | |
1232 | * | |
1233 | * Device is not in dormant state. | |
1234 | */ | |
b00055aa SR |
1235 | static inline void netif_dormant_off(struct net_device *dev) |
1236 | { | |
1237 | if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) | |
1238 | linkwatch_fire_event(dev); | |
1239 | } | |
1240 | ||
bea3348e SH |
1241 | /** |
1242 | * netif_dormant - test if carrier present | |
1243 | * @dev: network device | |
1244 | * | |
1245 | * Check if carrier is present on device | |
1246 | */ | |
b00055aa SR |
1247 | static inline int netif_dormant(const struct net_device *dev) |
1248 | { | |
1249 | return test_bit(__LINK_STATE_DORMANT, &dev->state); | |
1250 | } | |
1251 | ||
1252 | ||
bea3348e SH |
1253 | /** |
1254 | * netif_oper_up - test if device is operational | |
1255 | * @dev: network device | |
1256 | * | |
1257 | * Check if carrier is operational | |
1258 | */ | |
b00055aa SR |
1259 | static inline int netif_oper_up(const struct net_device *dev) { |
1260 | return (dev->operstate == IF_OPER_UP || | |
1261 | dev->operstate == IF_OPER_UNKNOWN /* backward compat */); | |
1262 | } | |
1263 | ||
bea3348e SH |
1264 | /** |
1265 | * netif_device_present - is device available or removed | |
1266 | * @dev: network device | |
1267 | * | |
1268 | * Check if device has not been removed from system. | |
1269 | */ | |
1da177e4 LT |
1270 | static inline int netif_device_present(struct net_device *dev) |
1271 | { | |
1272 | return test_bit(__LINK_STATE_PRESENT, &dev->state); | |
1273 | } | |
1274 | ||
56079431 | 1275 | extern void netif_device_detach(struct net_device *dev); |
1da177e4 | 1276 | |
56079431 | 1277 | extern void netif_device_attach(struct net_device *dev); |
1da177e4 LT |
1278 | |
1279 | /* | |
1280 | * Network interface message level settings | |
1281 | */ | |
1282 | #define HAVE_NETIF_MSG 1 | |
1283 | ||
1284 | enum { | |
1285 | NETIF_MSG_DRV = 0x0001, | |
1286 | NETIF_MSG_PROBE = 0x0002, | |
1287 | NETIF_MSG_LINK = 0x0004, | |
1288 | NETIF_MSG_TIMER = 0x0008, | |
1289 | NETIF_MSG_IFDOWN = 0x0010, | |
1290 | NETIF_MSG_IFUP = 0x0020, | |
1291 | NETIF_MSG_RX_ERR = 0x0040, | |
1292 | NETIF_MSG_TX_ERR = 0x0080, | |
1293 | NETIF_MSG_TX_QUEUED = 0x0100, | |
1294 | NETIF_MSG_INTR = 0x0200, | |
1295 | NETIF_MSG_TX_DONE = 0x0400, | |
1296 | NETIF_MSG_RX_STATUS = 0x0800, | |
1297 | NETIF_MSG_PKTDATA = 0x1000, | |
1298 | NETIF_MSG_HW = 0x2000, | |
1299 | NETIF_MSG_WOL = 0x4000, | |
1300 | }; | |
1301 | ||
1302 | #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) | |
1303 | #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) | |
1304 | #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) | |
1305 | #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) | |
1306 | #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) | |
1307 | #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) | |
1308 | #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) | |
1309 | #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) | |
1310 | #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) | |
1311 | #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) | |
1312 | #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) | |
1313 | #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) | |
1314 | #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) | |
1315 | #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) | |
1316 | #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) | |
1317 | ||
1318 | static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) | |
1319 | { | |
1320 | /* use default */ | |
1321 | if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) | |
1322 | return default_msg_enable_bits; | |
1323 | if (debug_value == 0) /* no output */ | |
1324 | return 0; | |
1325 | /* set low N bits */ | |
1326 | return (1 << debug_value) - 1; | |
1327 | } | |
1328 | ||
0a122576 | 1329 | /* Test if receive needs to be scheduled but only if up */ |
bea3348e SH |
1330 | static inline int netif_rx_schedule_prep(struct net_device *dev, |
1331 | struct napi_struct *napi) | |
1da177e4 | 1332 | { |
a0a46196 | 1333 | return napi_schedule_prep(napi); |
1da177e4 LT |
1334 | } |
1335 | ||
1336 | /* Add interface to tail of rx poll list. This assumes that _prep has | |
1337 | * already been called and returned 1. | |
1338 | */ | |
bea3348e SH |
1339 | static inline void __netif_rx_schedule(struct net_device *dev, |
1340 | struct napi_struct *napi) | |
1341 | { | |
bea3348e SH |
1342 | __napi_schedule(napi); |
1343 | } | |
1da177e4 LT |
1344 | |
1345 | /* Try to reschedule poll. Called by irq handler. */ | |
1346 | ||
bea3348e SH |
1347 | static inline void netif_rx_schedule(struct net_device *dev, |
1348 | struct napi_struct *napi) | |
1da177e4 | 1349 | { |
bea3348e SH |
1350 | if (netif_rx_schedule_prep(dev, napi)) |
1351 | __netif_rx_schedule(dev, napi); | |
1da177e4 LT |
1352 | } |
1353 | ||
bea3348e SH |
1354 | /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */ |
1355 | static inline int netif_rx_reschedule(struct net_device *dev, | |
1356 | struct napi_struct *napi) | |
1da177e4 | 1357 | { |
bea3348e SH |
1358 | if (napi_schedule_prep(napi)) { |
1359 | __netif_rx_schedule(dev, napi); | |
1da177e4 LT |
1360 | return 1; |
1361 | } | |
1362 | return 0; | |
1363 | } | |
1364 | ||
b0ba6667 HX |
1365 | /* same as netif_rx_complete, except that local_irq_save(flags) |
1366 | * has already been issued | |
1367 | */ | |
bea3348e SH |
1368 | static inline void __netif_rx_complete(struct net_device *dev, |
1369 | struct napi_struct *napi) | |
b0ba6667 | 1370 | { |
bea3348e | 1371 | __napi_complete(napi); |
b0ba6667 HX |
1372 | } |
1373 | ||
1da177e4 LT |
1374 | /* Remove interface from poll list: it must be in the poll list |
1375 | * on current cpu. This primitive is called by dev->poll(), when | |
1376 | * it completes the work. The device cannot be out of poll list at this | |
1377 | * moment, it is BUG(). | |
1378 | */ | |
bea3348e SH |
1379 | static inline void netif_rx_complete(struct net_device *dev, |
1380 | struct napi_struct *napi) | |
1da177e4 LT |
1381 | { |
1382 | unsigned long flags; | |
1383 | ||
1384 | local_irq_save(flags); | |
bea3348e | 1385 | __netif_rx_complete(dev, napi); |
1da177e4 LT |
1386 | local_irq_restore(flags); |
1387 | } | |
1388 | ||
bea3348e SH |
1389 | /** |
1390 | * netif_tx_lock - grab network device transmit lock | |
1391 | * @dev: network device | |
c4ea43c5 | 1392 | * @cpu: cpu number of lock owner |
bea3348e SH |
1393 | * |
1394 | * Get network device transmit lock | |
1395 | */ | |
22dd7495 | 1396 | static inline void __netif_tx_lock(struct net_device *dev, int cpu) |
932ff279 HX |
1397 | { |
1398 | spin_lock(&dev->_xmit_lock); | |
22dd7495 JHS |
1399 | dev->xmit_lock_owner = cpu; |
1400 | } | |
1401 | ||
1402 | static inline void netif_tx_lock(struct net_device *dev) | |
1403 | { | |
1404 | __netif_tx_lock(dev, smp_processor_id()); | |
932ff279 HX |
1405 | } |
1406 | ||
1407 | static inline void netif_tx_lock_bh(struct net_device *dev) | |
1408 | { | |
1409 | spin_lock_bh(&dev->_xmit_lock); | |
1410 | dev->xmit_lock_owner = smp_processor_id(); | |
1411 | } | |
1412 | ||
1413 | static inline int netif_tx_trylock(struct net_device *dev) | |
1414 | { | |
53c4b2cc HX |
1415 | int ok = spin_trylock(&dev->_xmit_lock); |
1416 | if (likely(ok)) | |
932ff279 | 1417 | dev->xmit_lock_owner = smp_processor_id(); |
53c4b2cc | 1418 | return ok; |
932ff279 HX |
1419 | } |
1420 | ||
1421 | static inline void netif_tx_unlock(struct net_device *dev) | |
1422 | { | |
1423 | dev->xmit_lock_owner = -1; | |
1424 | spin_unlock(&dev->_xmit_lock); | |
1425 | } | |
1426 | ||
1427 | static inline void netif_tx_unlock_bh(struct net_device *dev) | |
1428 | { | |
1429 | dev->xmit_lock_owner = -1; | |
1430 | spin_unlock_bh(&dev->_xmit_lock); | |
1431 | } | |
1432 | ||
22dd7495 JHS |
1433 | #define HARD_TX_LOCK(dev, cpu) { \ |
1434 | if ((dev->features & NETIF_F_LLTX) == 0) { \ | |
1435 | __netif_tx_lock(dev, cpu); \ | |
1436 | } \ | |
1437 | } | |
1438 | ||
1439 | #define HARD_TX_UNLOCK(dev) { \ | |
1440 | if ((dev->features & NETIF_F_LLTX) == 0) { \ | |
1441 | netif_tx_unlock(dev); \ | |
1442 | } \ | |
1443 | } | |
1444 | ||
1da177e4 LT |
1445 | static inline void netif_tx_disable(struct net_device *dev) |
1446 | { | |
932ff279 | 1447 | netif_tx_lock_bh(dev); |
1da177e4 | 1448 | netif_stop_queue(dev); |
932ff279 | 1449 | netif_tx_unlock_bh(dev); |
1da177e4 LT |
1450 | } |
1451 | ||
1452 | /* These functions live elsewhere (drivers/net/net_init.c, but related) */ | |
1453 | ||
1454 | extern void ether_setup(struct net_device *dev); | |
1455 | ||
1456 | /* Support for loadable net-drivers */ | |
f25f4e44 PWJ |
1457 | extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, |
1458 | void (*setup)(struct net_device *), | |
1459 | unsigned int queue_count); | |
1460 | #define alloc_netdev(sizeof_priv, name, setup) \ | |
1461 | alloc_netdev_mq(sizeof_priv, name, setup, 1) | |
1da177e4 LT |
1462 | extern int register_netdev(struct net_device *dev); |
1463 | extern void unregister_netdev(struct net_device *dev); | |
4417da66 PM |
1464 | /* Functions used for secondary unicast and multicast support */ |
1465 | extern void dev_set_rx_mode(struct net_device *dev); | |
1466 | extern void __dev_set_rx_mode(struct net_device *dev); | |
1467 | extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen); | |
1468 | extern int dev_unicast_add(struct net_device *dev, void *addr, int alen); | |
e83a2ea8 CL |
1469 | extern int dev_unicast_sync(struct net_device *to, struct net_device *from); |
1470 | extern void dev_unicast_unsync(struct net_device *to, struct net_device *from); | |
1da177e4 LT |
1471 | extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all); |
1472 | extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly); | |
a0a400d7 PM |
1473 | extern int dev_mc_sync(struct net_device *to, struct net_device *from); |
1474 | extern void dev_mc_unsync(struct net_device *to, struct net_device *from); | |
61cbc2fc PM |
1475 | extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all); |
1476 | extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly); | |
e83a2ea8 CL |
1477 | extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); |
1478 | extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); | |
1da177e4 LT |
1479 | extern void dev_set_promiscuity(struct net_device *dev, int inc); |
1480 | extern void dev_set_allmulti(struct net_device *dev, int inc); | |
1481 | extern void netdev_state_change(struct net_device *dev); | |
c1da4ac7 | 1482 | extern void netdev_bonding_change(struct net_device *dev); |
d8a33ac4 | 1483 | extern void netdev_features_change(struct net_device *dev); |
1da177e4 | 1484 | /* Load a device via the kmod */ |
881d966b | 1485 | extern void dev_load(struct net *net, const char *name); |
1da177e4 LT |
1486 | extern void dev_mcast_init(void); |
1487 | extern int netdev_max_backlog; | |
1488 | extern int weight_p; | |
1489 | extern int netdev_set_master(struct net_device *dev, struct net_device *master); | |
84fa7933 | 1490 | extern int skb_checksum_help(struct sk_buff *skb); |
576a30eb | 1491 | extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features); |
fb286bb2 HX |
1492 | #ifdef CONFIG_BUG |
1493 | extern void netdev_rx_csum_fault(struct net_device *dev); | |
1494 | #else | |
1495 | static inline void netdev_rx_csum_fault(struct net_device *dev) | |
1496 | { | |
1497 | } | |
1498 | #endif | |
1da177e4 LT |
1499 | /* rx skb timestamps */ |
1500 | extern void net_enable_timestamp(void); | |
1501 | extern void net_disable_timestamp(void); | |
1502 | ||
20380731 ACM |
1503 | #ifdef CONFIG_PROC_FS |
1504 | extern void *dev_seq_start(struct seq_file *seq, loff_t *pos); | |
1505 | extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos); | |
1506 | extern void dev_seq_stop(struct seq_file *seq, void *v); | |
1507 | #endif | |
1508 | ||
1509 | extern void linkwatch_run_queue(void); | |
1510 | ||
7f353bf2 HX |
1511 | extern int netdev_compute_features(unsigned long all, unsigned long one); |
1512 | ||
bcd76111 | 1513 | static inline int net_gso_ok(int features, int gso_type) |
576a30eb | 1514 | { |
bcd76111 | 1515 | int feature = gso_type << NETIF_F_GSO_SHIFT; |
d6b4991a | 1516 | return (features & feature) == feature; |
576a30eb HX |
1517 | } |
1518 | ||
bcd76111 HX |
1519 | static inline int skb_gso_ok(struct sk_buff *skb, int features) |
1520 | { | |
a430a43d | 1521 | return net_gso_ok(features, skb_shinfo(skb)->gso_type); |
bcd76111 HX |
1522 | } |
1523 | ||
7967168c HX |
1524 | static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) |
1525 | { | |
a430a43d HX |
1526 | return skb_is_gso(skb) && |
1527 | (!skb_gso_ok(skb, dev->features) || | |
84fa7933 | 1528 | unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); |
7967168c HX |
1529 | } |
1530 | ||
82cc1a7a PWJ |
1531 | static inline void netif_set_gso_max_size(struct net_device *dev, |
1532 | unsigned int size) | |
1533 | { | |
1534 | dev->gso_max_size = size; | |
1535 | } | |
1536 | ||
7ea49ed7 | 1537 | /* On bonding slaves other than the currently active slave, suppress |
f5b2b966 JV |
1538 | * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and |
1539 | * ARP on active-backup slaves with arp_validate enabled. | |
7ea49ed7 DM |
1540 | */ |
1541 | static inline int skb_bond_should_drop(struct sk_buff *skb) | |
1542 | { | |
1543 | struct net_device *dev = skb->dev; | |
1544 | struct net_device *master = dev->master; | |
1545 | ||
1546 | if (master && | |
1547 | (dev->priv_flags & IFF_SLAVE_INACTIVE)) { | |
f5b2b966 JV |
1548 | if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && |
1549 | skb->protocol == __constant_htons(ETH_P_ARP)) | |
1550 | return 0; | |
1551 | ||
7ea49ed7 DM |
1552 | if (master->priv_flags & IFF_MASTER_ALB) { |
1553 | if (skb->pkt_type != PACKET_BROADCAST && | |
1554 | skb->pkt_type != PACKET_MULTICAST) | |
1555 | return 0; | |
1556 | } | |
1557 | if (master->priv_flags & IFF_MASTER_8023AD && | |
1558 | skb->protocol == __constant_htons(ETH_P_SLOW)) | |
1559 | return 0; | |
1560 | ||
1561 | return 1; | |
1562 | } | |
1563 | return 0; | |
1564 | } | |
1565 | ||
1da177e4 LT |
1566 | #endif /* __KERNEL__ */ |
1567 | ||
1568 | #endif /* _LINUX_DEV_H */ |