stmmac: fix a typo in the macro used to mask the mmc irq
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / packet / af_packet.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * PACKET - implements raw packet sockets.
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 *
1ce4f28b 12 * Fixes:
1da177e4
LT
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
1ce4f28b 35 * Ulises Alonso : Frame number limit removal and
1da177e4 36 * packet_set_ring memory leak.
0fb375fb
EB
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
1ce4f28b 40 * byte arrays at the end of sockaddr_ll
0fb375fb 41 * and packet_mreq.
69e3c75f 42 * Johann Baudy : Added TX RING.
f6fb8f10 43 * Chetan Loke : Implemented TPACKET_V3 block abstraction
44 * layer.
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
46 *
1da177e4
LT
47 *
48 * This program is free software; you can redistribute it and/or
49 * modify it under the terms of the GNU General Public License
50 * as published by the Free Software Foundation; either version
51 * 2 of the License, or (at your option) any later version.
52 *
53 */
1ce4f28b 54
1da177e4 55#include <linux/types.h>
1da177e4 56#include <linux/mm.h>
4fc268d2 57#include <linux/capability.h>
1da177e4
LT
58#include <linux/fcntl.h>
59#include <linux/socket.h>
60#include <linux/in.h>
61#include <linux/inet.h>
62#include <linux/netdevice.h>
63#include <linux/if_packet.h>
64#include <linux/wireless.h>
ffbc6111 65#include <linux/kernel.h>
1da177e4 66#include <linux/kmod.h>
5a0e3ad6 67#include <linux/slab.h>
0e3125c7 68#include <linux/vmalloc.h>
457c4cbc 69#include <net/net_namespace.h>
1da177e4
LT
70#include <net/ip.h>
71#include <net/protocol.h>
72#include <linux/skbuff.h>
73#include <net/sock.h>
74#include <linux/errno.h>
75#include <linux/timer.h>
1da177e4
LT
76#include <asm/uaccess.h>
77#include <asm/ioctls.h>
78#include <asm/page.h>
a1f8e7f7 79#include <asm/cacheflush.h>
1da177e4
LT
80#include <asm/io.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
83#include <linux/poll.h>
84#include <linux/module.h>
85#include <linux/init.h>
905db440 86#include <linux/mutex.h>
05423b24 87#include <linux/if_vlan.h>
bfd5f4a3 88#include <linux/virtio_net.h>
ed85b565 89#include <linux/errqueue.h>
614f60fa 90#include <linux/net_tstamp.h>
1da177e4
LT
91
92#ifdef CONFIG_INET
93#include <net/inet_common.h>
94#endif
95
1da177e4
LT
96/*
97 Assumptions:
98 - if device has no dev->hard_header routine, it adds and removes ll header
99 inside itself. In this case ll header is invisible outside of device,
100 but higher levels still should reserve dev->hard_header_len.
101 Some devices are enough clever to reallocate skb, when header
102 will not fit to reserved space (tunnel), another ones are silly
103 (PPP).
104 - packet socket receives packets with pulled ll header,
105 so that SOCK_RAW should push it back.
106
107On receive:
108-----------
109
110Incoming, dev->hard_header!=NULL
b0e380b1
ACM
111 mac_header -> ll header
112 data -> data
1da177e4
LT
113
114Outgoing, dev->hard_header!=NULL
b0e380b1
ACM
115 mac_header -> ll header
116 data -> ll header
1da177e4
LT
117
118Incoming, dev->hard_header==NULL
b0e380b1
ACM
119 mac_header -> UNKNOWN position. It is very likely, that it points to ll
120 header. PPP makes it, that is wrong, because introduce
db0c58f9 121 assymetry between rx and tx paths.
b0e380b1 122 data -> data
1da177e4
LT
123
124Outgoing, dev->hard_header==NULL
b0e380b1
ACM
125 mac_header -> data. ll header is still not built!
126 data -> data
1da177e4
LT
127
128Resume
129 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
130
131
132On transmit:
133------------
134
135dev->hard_header != NULL
b0e380b1
ACM
136 mac_header -> ll header
137 data -> ll header
1da177e4
LT
138
139dev->hard_header == NULL (ll header is added by device, we cannot control it)
b0e380b1
ACM
140 mac_header -> data
141 data -> data
1da177e4
LT
142
143 We should set nh.raw on output to correct posistion,
144 packet classifier depends on it.
145 */
146
1da177e4
LT
147/* Private packet socket structures. */
148
40d4e3df 149struct packet_mclist {
1da177e4
LT
150 struct packet_mclist *next;
151 int ifindex;
152 int count;
153 unsigned short type;
154 unsigned short alen;
0fb375fb
EB
155 unsigned char addr[MAX_ADDR_LEN];
156};
157/* identical to struct packet_mreq except it has
158 * a longer address field.
159 */
40d4e3df 160struct packet_mreq_max {
0fb375fb
EB
161 int mr_ifindex;
162 unsigned short mr_type;
163 unsigned short mr_alen;
164 unsigned char mr_address[MAX_ADDR_LEN];
1da177e4 165};
a2efcfa0 166
f6fb8f10 167static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
69e3c75f
JB
168 int closing, int tx_ring);
169
f6fb8f10 170
171#define V3_ALIGNMENT (8)
172
bc59ba39 173#define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
f6fb8f10 174
175#define BLK_PLUS_PRIV(sz_of_priv) \
176 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
177
178/* kbdq - kernel block descriptor queue */
bc59ba39 179struct tpacket_kbdq_core {
f6fb8f10 180 struct pgv *pkbdq;
181 unsigned int feature_req_word;
182 unsigned int hdrlen;
183 unsigned char reset_pending_on_curr_blk;
184 unsigned char delete_blk_timer;
185 unsigned short kactive_blk_num;
186 unsigned short blk_sizeof_priv;
187
188 /* last_kactive_blk_num:
189 * trick to see if user-space has caught up
190 * in order to avoid refreshing timer when every single pkt arrives.
191 */
192 unsigned short last_kactive_blk_num;
193
194 char *pkblk_start;
195 char *pkblk_end;
196 int kblk_size;
197 unsigned int knum_blocks;
198 uint64_t knxt_seq_num;
199 char *prev;
200 char *nxt_offset;
201 struct sk_buff *skb;
202
203 atomic_t blk_fill_in_prog;
204
205 /* Default is set to 8ms */
206#define DEFAULT_PRB_RETIRE_TOV (8)
207
208 unsigned short retire_blk_tov;
209 unsigned short version;
210 unsigned long tov_in_jiffies;
211
212 /* timer to retire an outstanding block */
213 struct timer_list retire_blk_timer;
214};
215
216#define PGV_FROM_VMALLOC 1
0e3125c7
NH
217struct pgv {
218 char *buffer;
0e3125c7
NH
219};
220
69e3c75f 221struct packet_ring_buffer {
0e3125c7 222 struct pgv *pg_vec;
69e3c75f
JB
223 unsigned int head;
224 unsigned int frames_per_block;
225 unsigned int frame_size;
226 unsigned int frame_max;
227
228 unsigned int pg_vec_order;
229 unsigned int pg_vec_pages;
230 unsigned int pg_vec_len;
231
bc59ba39 232 struct tpacket_kbdq_core prb_bdqc;
69e3c75f
JB
233 atomic_t pending;
234};
235
f6fb8f10 236#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
237#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
238#define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
239#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
240#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
241#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
242#define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
243
69e3c75f
JB
244struct packet_sock;
245static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
1da177e4 246
f6fb8f10 247static void *packet_previous_frame(struct packet_sock *po,
248 struct packet_ring_buffer *rb,
249 int status);
250static void packet_increment_head(struct packet_ring_buffer *buff);
bc59ba39 251static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
252 struct tpacket_block_desc *);
253static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
f6fb8f10 254 struct packet_sock *);
bc59ba39 255static void prb_retire_current_block(struct tpacket_kbdq_core *,
f6fb8f10 256 struct packet_sock *, unsigned int status);
bc59ba39 257static int prb_queue_frozen(struct tpacket_kbdq_core *);
258static void prb_open_block(struct tpacket_kbdq_core *,
259 struct tpacket_block_desc *);
f6fb8f10 260static void prb_retire_rx_blk_timer_expired(unsigned long);
bc59ba39 261static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
262static void prb_init_blk_timer(struct packet_sock *,
263 struct tpacket_kbdq_core *,
264 void (*func) (unsigned long));
265static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
266static void prb_clear_rxhash(struct tpacket_kbdq_core *,
267 struct tpacket3_hdr *);
268static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
269 struct tpacket3_hdr *);
1da177e4
LT
270static void packet_flush_mclist(struct sock *sk);
271
dc99f600 272struct packet_fanout;
1da177e4
LT
273struct packet_sock {
274 /* struct sock has to be the first member of packet_sock */
275 struct sock sk;
dc99f600 276 struct packet_fanout *fanout;
1da177e4 277 struct tpacket_stats stats;
f6fb8f10 278 union tpacket_stats_u stats_u;
69e3c75f
JB
279 struct packet_ring_buffer rx_ring;
280 struct packet_ring_buffer tx_ring;
1da177e4 281 int copy_thresh;
1da177e4 282 spinlock_t bind_lock;
905db440 283 struct mutex pg_vec_lock;
8dc41944 284 unsigned int running:1, /* prot_hook is attached*/
80feaacb 285 auxdata:1,
bfd5f4a3
SS
286 origdev:1,
287 has_vnet_hdr:1;
1da177e4 288 int ifindex; /* bound device */
0e11c91e 289 __be16 num;
1da177e4 290 struct packet_mclist *mclist;
1da177e4 291 atomic_t mapped;
bbd6ef87
PM
292 enum tpacket_versions tp_version;
293 unsigned int tp_hdrlen;
8913336a 294 unsigned int tp_reserve;
69e3c75f 295 unsigned int tp_loss:1;
614f60fa 296 unsigned int tp_tstamp;
94b05952 297 struct packet_type prot_hook ____cacheline_aligned_in_smp;
1da177e4
LT
298};
299
dc99f600
DM
300#define PACKET_FANOUT_MAX 256
301
302struct packet_fanout {
303#ifdef CONFIG_NET_NS
304 struct net *net;
305#endif
306 unsigned int num_members;
307 u16 id;
308 u8 type;
7736d33f 309 u8 defrag;
dc99f600
DM
310 atomic_t rr_cur;
311 struct list_head list;
312 struct sock *arr[PACKET_FANOUT_MAX];
313 spinlock_t lock;
314 atomic_t sk_ref;
315 struct packet_type prot_hook ____cacheline_aligned_in_smp;
316};
317
ffbc6111
HX
318struct packet_skb_cb {
319 unsigned int origlen;
320 union {
321 struct sockaddr_pkt pkt;
322 struct sockaddr_ll ll;
323 } sa;
324};
325
326#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
8dc41944 327
bc59ba39 328#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
f6fb8f10 329#define GET_PBLOCK_DESC(x, bid) \
bc59ba39 330 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
f6fb8f10 331#define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
bc59ba39 332 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
f6fb8f10 333#define GET_NEXT_PRB_BLK_NUM(x) \
334 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
335 ((x)->kactive_blk_num+1) : 0)
336
eea49cc9 337static struct packet_sock *pkt_sk(struct sock *sk)
ce06b03e
DM
338{
339 return (struct packet_sock *)sk;
340}
341
dc99f600
DM
342static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
343static void __fanout_link(struct sock *sk, struct packet_sock *po);
344
ce06b03e
DM
345/* register_prot_hook must be invoked with the po->bind_lock held,
346 * or from a context in which asynchronous accesses to the packet
347 * socket is not possible (packet_create()).
348 */
349static void register_prot_hook(struct sock *sk)
350{
351 struct packet_sock *po = pkt_sk(sk);
352 if (!po->running) {
dc99f600
DM
353 if (po->fanout)
354 __fanout_link(sk, po);
355 else
356 dev_add_pack(&po->prot_hook);
ce06b03e
DM
357 sock_hold(sk);
358 po->running = 1;
359 }
360}
361
362/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
363 * held. If the sync parameter is true, we will temporarily drop
364 * the po->bind_lock and do a synchronize_net to make sure no
365 * asynchronous packet processing paths still refer to the elements
366 * of po->prot_hook. If the sync parameter is false, it is the
367 * callers responsibility to take care of this.
368 */
369static void __unregister_prot_hook(struct sock *sk, bool sync)
370{
371 struct packet_sock *po = pkt_sk(sk);
372
373 po->running = 0;
dc99f600
DM
374 if (po->fanout)
375 __fanout_unlink(sk, po);
376 else
377 __dev_remove_pack(&po->prot_hook);
ce06b03e
DM
378 __sock_put(sk);
379
380 if (sync) {
381 spin_unlock(&po->bind_lock);
382 synchronize_net();
383 spin_lock(&po->bind_lock);
384 }
385}
386
387static void unregister_prot_hook(struct sock *sk, bool sync)
388{
389 struct packet_sock *po = pkt_sk(sk);
390
391 if (po->running)
392 __unregister_prot_hook(sk, sync);
393}
394
f6dafa95 395static inline __pure struct page *pgv_to_page(void *addr)
0af55bb5
CG
396{
397 if (is_vmalloc_addr(addr))
398 return vmalloc_to_page(addr);
399 return virt_to_page(addr);
400}
401
69e3c75f 402static void __packet_set_status(struct packet_sock *po, void *frame, int status)
1da177e4 403{
bbd6ef87
PM
404 union {
405 struct tpacket_hdr *h1;
406 struct tpacket2_hdr *h2;
407 void *raw;
408 } h;
1da177e4 409
69e3c75f 410 h.raw = frame;
bbd6ef87
PM
411 switch (po->tp_version) {
412 case TPACKET_V1:
69e3c75f 413 h.h1->tp_status = status;
0af55bb5 414 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
bbd6ef87
PM
415 break;
416 case TPACKET_V2:
69e3c75f 417 h.h2->tp_status = status;
0af55bb5 418 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
bbd6ef87 419 break;
f6fb8f10 420 case TPACKET_V3:
69e3c75f 421 default:
f6fb8f10 422 WARN(1, "TPACKET version not supported.\n");
69e3c75f 423 BUG();
bbd6ef87 424 }
69e3c75f
JB
425
426 smp_wmb();
bbd6ef87
PM
427}
428
69e3c75f 429static int __packet_get_status(struct packet_sock *po, void *frame)
bbd6ef87
PM
430{
431 union {
432 struct tpacket_hdr *h1;
433 struct tpacket2_hdr *h2;
434 void *raw;
435 } h;
436
69e3c75f
JB
437 smp_rmb();
438
bbd6ef87
PM
439 h.raw = frame;
440 switch (po->tp_version) {
441 case TPACKET_V1:
0af55bb5 442 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
69e3c75f 443 return h.h1->tp_status;
bbd6ef87 444 case TPACKET_V2:
0af55bb5 445 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
69e3c75f 446 return h.h2->tp_status;
f6fb8f10 447 case TPACKET_V3:
69e3c75f 448 default:
f6fb8f10 449 WARN(1, "TPACKET version not supported.\n");
69e3c75f
JB
450 BUG();
451 return 0;
bbd6ef87 452 }
1da177e4 453}
69e3c75f
JB
454
455static void *packet_lookup_frame(struct packet_sock *po,
456 struct packet_ring_buffer *rb,
457 unsigned int position,
458 int status)
459{
460 unsigned int pg_vec_pos, frame_offset;
461 union {
462 struct tpacket_hdr *h1;
463 struct tpacket2_hdr *h2;
464 void *raw;
465 } h;
466
467 pg_vec_pos = position / rb->frames_per_block;
468 frame_offset = position % rb->frames_per_block;
469
0e3125c7
NH
470 h.raw = rb->pg_vec[pg_vec_pos].buffer +
471 (frame_offset * rb->frame_size);
69e3c75f
JB
472
473 if (status != __packet_get_status(po, h.raw))
474 return NULL;
475
476 return h.raw;
477}
478
eea49cc9 479static void *packet_current_frame(struct packet_sock *po,
69e3c75f
JB
480 struct packet_ring_buffer *rb,
481 int status)
482{
483 return packet_lookup_frame(po, rb, rb->head, status);
484}
485
bc59ba39 486static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
f6fb8f10 487{
488 del_timer_sync(&pkc->retire_blk_timer);
489}
490
491static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
492 int tx_ring,
493 struct sk_buff_head *rb_queue)
494{
bc59ba39 495 struct tpacket_kbdq_core *pkc;
f6fb8f10 496
497 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
498
499 spin_lock(&rb_queue->lock);
500 pkc->delete_blk_timer = 1;
501 spin_unlock(&rb_queue->lock);
502
503 prb_del_retire_blk_timer(pkc);
504}
505
506static void prb_init_blk_timer(struct packet_sock *po,
bc59ba39 507 struct tpacket_kbdq_core *pkc,
f6fb8f10 508 void (*func) (unsigned long))
509{
510 init_timer(&pkc->retire_blk_timer);
511 pkc->retire_blk_timer.data = (long)po;
512 pkc->retire_blk_timer.function = func;
513 pkc->retire_blk_timer.expires = jiffies;
514}
515
516static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
517{
bc59ba39 518 struct tpacket_kbdq_core *pkc;
f6fb8f10 519
520 if (tx_ring)
521 BUG();
522
523 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
524 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
525}
526
527static int prb_calc_retire_blk_tmo(struct packet_sock *po,
528 int blk_size_in_bytes)
529{
530 struct net_device *dev;
531 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
4bc71cb9
JP
532 struct ethtool_cmd ecmd;
533 int err;
e440cf2c 534 u32 speed;
f6fb8f10 535
4bc71cb9
JP
536 rtnl_lock();
537 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
538 if (unlikely(!dev)) {
539 rtnl_unlock();
f6fb8f10 540 return DEFAULT_PRB_RETIRE_TOV;
4bc71cb9
JP
541 }
542 err = __ethtool_get_settings(dev, &ecmd);
e440cf2c 543 speed = ethtool_cmd_speed(&ecmd);
4bc71cb9
JP
544 rtnl_unlock();
545 if (!err) {
4bc71cb9
JP
546 /*
547 * If the link speed is so slow you don't really
548 * need to worry about perf anyways
549 */
e440cf2c 550 if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
4bc71cb9 551 return DEFAULT_PRB_RETIRE_TOV;
e440cf2c 552 } else {
553 msec = 1;
554 div = speed / 1000;
f6fb8f10 555 }
556 }
557
558 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
559
560 if (div)
561 mbits /= div;
562
563 tmo = mbits * msec;
564
565 if (div)
566 return tmo+1;
567 return tmo;
568}
569
bc59ba39 570static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
f6fb8f10 571 union tpacket_req_u *req_u)
572{
573 p1->feature_req_word = req_u->req3.tp_feature_req_word;
574}
575
576static void init_prb_bdqc(struct packet_sock *po,
577 struct packet_ring_buffer *rb,
578 struct pgv *pg_vec,
579 union tpacket_req_u *req_u, int tx_ring)
580{
bc59ba39 581 struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
582 struct tpacket_block_desc *pbd;
f6fb8f10 583
584 memset(p1, 0x0, sizeof(*p1));
585
586 p1->knxt_seq_num = 1;
587 p1->pkbdq = pg_vec;
bc59ba39 588 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
e3192690 589 p1->pkblk_start = pg_vec[0].buffer;
f6fb8f10 590 p1->kblk_size = req_u->req3.tp_block_size;
591 p1->knum_blocks = req_u->req3.tp_block_nr;
592 p1->hdrlen = po->tp_hdrlen;
593 p1->version = po->tp_version;
594 p1->last_kactive_blk_num = 0;
595 po->stats_u.stats3.tp_freeze_q_cnt = 0;
596 if (req_u->req3.tp_retire_blk_tov)
597 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
598 else
599 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
600 req_u->req3.tp_block_size);
601 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
602 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
603
604 prb_init_ft_ops(p1, req_u);
605 prb_setup_retire_blk_timer(po, tx_ring);
606 prb_open_block(p1, pbd);
607}
608
609/* Do NOT update the last_blk_num first.
610 * Assumes sk_buff_head lock is held.
611 */
bc59ba39 612static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
f6fb8f10 613{
614 mod_timer(&pkc->retire_blk_timer,
615 jiffies + pkc->tov_in_jiffies);
616 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
617}
618
619/*
620 * Timer logic:
621 * 1) We refresh the timer only when we open a block.
622 * By doing this we don't waste cycles refreshing the timer
623 * on packet-by-packet basis.
624 *
625 * With a 1MB block-size, on a 1Gbps line, it will take
626 * i) ~8 ms to fill a block + ii) memcpy etc.
627 * In this cut we are not accounting for the memcpy time.
628 *
629 * So, if the user sets the 'tmo' to 10ms then the timer
630 * will never fire while the block is still getting filled
631 * (which is what we want). However, the user could choose
632 * to close a block early and that's fine.
633 *
634 * But when the timer does fire, we check whether or not to refresh it.
635 * Since the tmo granularity is in msecs, it is not too expensive
636 * to refresh the timer, lets say every '8' msecs.
637 * Either the user can set the 'tmo' or we can derive it based on
638 * a) line-speed and b) block-size.
639 * prb_calc_retire_blk_tmo() calculates the tmo.
640 *
641 */
642static void prb_retire_rx_blk_timer_expired(unsigned long data)
643{
644 struct packet_sock *po = (struct packet_sock *)data;
bc59ba39 645 struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
f6fb8f10 646 unsigned int frozen;
bc59ba39 647 struct tpacket_block_desc *pbd;
f6fb8f10 648
649 spin_lock(&po->sk.sk_receive_queue.lock);
650
651 frozen = prb_queue_frozen(pkc);
652 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
653
654 if (unlikely(pkc->delete_blk_timer))
655 goto out;
656
657 /* We only need to plug the race when the block is partially filled.
658 * tpacket_rcv:
659 * lock(); increment BLOCK_NUM_PKTS; unlock()
660 * copy_bits() is in progress ...
661 * timer fires on other cpu:
662 * we can't retire the current block because copy_bits
663 * is in progress.
664 *
665 */
666 if (BLOCK_NUM_PKTS(pbd)) {
667 while (atomic_read(&pkc->blk_fill_in_prog)) {
668 /* Waiting for skb_copy_bits to finish... */
669 cpu_relax();
670 }
671 }
672
673 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
674 if (!frozen) {
675 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
676 if (!prb_dispatch_next_block(pkc, po))
677 goto refresh_timer;
678 else
679 goto out;
680 } else {
681 /* Case 1. Queue was frozen because user-space was
682 * lagging behind.
683 */
684 if (prb_curr_blk_in_use(pkc, pbd)) {
685 /*
686 * Ok, user-space is still behind.
687 * So just refresh the timer.
688 */
689 goto refresh_timer;
690 } else {
691 /* Case 2. queue was frozen,user-space caught up,
692 * now the link went idle && the timer fired.
693 * We don't have a block to close.So we open this
694 * block and restart the timer.
695 * opening a block thaws the queue,restarts timer
696 * Thawing/timer-refresh is a side effect.
697 */
698 prb_open_block(pkc, pbd);
699 goto out;
700 }
701 }
702 }
703
704refresh_timer:
705 _prb_refresh_rx_retire_blk_timer(pkc);
706
707out:
708 spin_unlock(&po->sk.sk_receive_queue.lock);
709}
710
eea49cc9 711static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
bc59ba39 712 struct tpacket_block_desc *pbd1, __u32 status)
f6fb8f10 713{
714 /* Flush everything minus the block header */
715
716#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
717 u8 *start, *end;
718
719 start = (u8 *)pbd1;
720
721 /* Skip the block header(we know header WILL fit in 4K) */
722 start += PAGE_SIZE;
723
724 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
725 for (; start < end; start += PAGE_SIZE)
726 flush_dcache_page(pgv_to_page(start));
727
728 smp_wmb();
729#endif
730
731 /* Now update the block status. */
732
733 BLOCK_STATUS(pbd1) = status;
734
735 /* Flush the block header */
736
737#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
738 start = (u8 *)pbd1;
739 flush_dcache_page(pgv_to_page(start));
740
741 smp_wmb();
742#endif
743}
744
745/*
746 * Side effect:
747 *
748 * 1) flush the block
749 * 2) Increment active_blk_num
750 *
751 * Note:We DONT refresh the timer on purpose.
752 * Because almost always the next block will be opened.
753 */
bc59ba39 754static void prb_close_block(struct tpacket_kbdq_core *pkc1,
755 struct tpacket_block_desc *pbd1,
f6fb8f10 756 struct packet_sock *po, unsigned int stat)
757{
758 __u32 status = TP_STATUS_USER | stat;
759
760 struct tpacket3_hdr *last_pkt;
bc59ba39 761 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
f6fb8f10 762
763 if (po->stats.tp_drops)
764 status |= TP_STATUS_LOSING;
765
766 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
767 last_pkt->tp_next_offset = 0;
768
769 /* Get the ts of the last pkt */
770 if (BLOCK_NUM_PKTS(pbd1)) {
771 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
772 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
773 } else {
774 /* Ok, we tmo'd - so get the current time */
775 struct timespec ts;
776 getnstimeofday(&ts);
777 h1->ts_last_pkt.ts_sec = ts.tv_sec;
778 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
779 }
780
781 smp_wmb();
782
783 /* Flush the block */
784 prb_flush_block(pkc1, pbd1, status);
785
786 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
787}
788
eea49cc9 789static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
f6fb8f10 790{
791 pkc->reset_pending_on_curr_blk = 0;
792}
793
794/*
795 * Side effect of opening a block:
796 *
797 * 1) prb_queue is thawed.
798 * 2) retire_blk_timer is refreshed.
799 *
800 */
bc59ba39 801static void prb_open_block(struct tpacket_kbdq_core *pkc1,
802 struct tpacket_block_desc *pbd1)
f6fb8f10 803{
804 struct timespec ts;
bc59ba39 805 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
f6fb8f10 806
807 smp_rmb();
808
809 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
810
811 /* We could have just memset this but we will lose the
812 * flexibility of making the priv area sticky
813 */
814 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
815 BLOCK_NUM_PKTS(pbd1) = 0;
816 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
817 getnstimeofday(&ts);
818 h1->ts_first_pkt.ts_sec = ts.tv_sec;
819 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
820 pkc1->pkblk_start = (char *)pbd1;
e3192690 821 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
f6fb8f10 822 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
823 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
824 pbd1->version = pkc1->version;
825 pkc1->prev = pkc1->nxt_offset;
826 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
827 prb_thaw_queue(pkc1);
828 _prb_refresh_rx_retire_blk_timer(pkc1);
829
830 smp_wmb();
831
832 return;
833 }
834
835 WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
836 pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
837 dump_stack();
838 BUG();
839}
840
841/*
842 * Queue freeze logic:
843 * 1) Assume tp_block_nr = 8 blocks.
844 * 2) At time 't0', user opens Rx ring.
845 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
846 * 4) user-space is either sleeping or processing block '0'.
847 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
848 * it will close block-7,loop around and try to fill block '0'.
849 * call-flow:
850 * __packet_lookup_frame_in_block
851 * prb_retire_current_block()
852 * prb_dispatch_next_block()
853 * |->(BLOCK_STATUS == USER) evaluates to true
854 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
855 * 6) Now there are two cases:
856 * 6.1) Link goes idle right after the queue is frozen.
857 * But remember, the last open_block() refreshed the timer.
858 * When this timer expires,it will refresh itself so that we can
859 * re-open block-0 in near future.
860 * 6.2) Link is busy and keeps on receiving packets. This is a simple
861 * case and __packet_lookup_frame_in_block will check if block-0
862 * is free and can now be re-used.
863 */
eea49cc9 864static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
f6fb8f10 865 struct packet_sock *po)
866{
867 pkc->reset_pending_on_curr_blk = 1;
868 po->stats_u.stats3.tp_freeze_q_cnt++;
869}
870
871#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
872
873/*
874 * If the next block is free then we will dispatch it
875 * and return a good offset.
876 * Else, we will freeze the queue.
877 * So, caller must check the return value.
878 */
bc59ba39 879static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
f6fb8f10 880 struct packet_sock *po)
881{
bc59ba39 882 struct tpacket_block_desc *pbd;
f6fb8f10 883
884 smp_rmb();
885
886 /* 1. Get current block num */
887 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
888
889 /* 2. If this block is currently in_use then freeze the queue */
890 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
891 prb_freeze_queue(pkc, po);
892 return NULL;
893 }
894
895 /*
896 * 3.
897 * open this block and return the offset where the first packet
898 * needs to get stored.
899 */
900 prb_open_block(pkc, pbd);
901 return (void *)pkc->nxt_offset;
902}
903
bc59ba39 904static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
f6fb8f10 905 struct packet_sock *po, unsigned int status)
906{
bc59ba39 907 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
f6fb8f10 908
909 /* retire/close the current block */
910 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
911 /*
912 * Plug the case where copy_bits() is in progress on
913 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
914 * have space to copy the pkt in the current block and
915 * called prb_retire_current_block()
916 *
917 * We don't need to worry about the TMO case because
918 * the timer-handler already handled this case.
919 */
920 if (!(status & TP_STATUS_BLK_TMO)) {
921 while (atomic_read(&pkc->blk_fill_in_prog)) {
922 /* Waiting for skb_copy_bits to finish... */
923 cpu_relax();
924 }
925 }
926 prb_close_block(pkc, pbd, po, status);
927 return;
928 }
929
930 WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
931 dump_stack();
932 BUG();
933}
934
eea49cc9 935static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
bc59ba39 936 struct tpacket_block_desc *pbd)
f6fb8f10 937{
938 return TP_STATUS_USER & BLOCK_STATUS(pbd);
939}
940
eea49cc9 941static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
f6fb8f10 942{
943 return pkc->reset_pending_on_curr_blk;
944}
945
eea49cc9 946static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
f6fb8f10 947{
bc59ba39 948 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
f6fb8f10 949 atomic_dec(&pkc->blk_fill_in_prog);
950}
951
eea49cc9 952static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
f6fb8f10 953 struct tpacket3_hdr *ppd)
954{
955 ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
956}
957
eea49cc9 958static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
f6fb8f10 959 struct tpacket3_hdr *ppd)
960{
961 ppd->hv1.tp_rxhash = 0;
962}
963
eea49cc9 964static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
f6fb8f10 965 struct tpacket3_hdr *ppd)
966{
967 if (vlan_tx_tag_present(pkc->skb)) {
968 ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
969 ppd->tp_status = TP_STATUS_VLAN_VALID;
970 } else {
971 ppd->hv1.tp_vlan_tci = ppd->tp_status = 0;
972 }
973}
974
bc59ba39 975static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
f6fb8f10 976 struct tpacket3_hdr *ppd)
977{
978 prb_fill_vlan_info(pkc, ppd);
979
980 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
981 prb_fill_rxhash(pkc, ppd);
982 else
983 prb_clear_rxhash(pkc, ppd);
984}
985
eea49cc9 986static void prb_fill_curr_block(char *curr,
bc59ba39 987 struct tpacket_kbdq_core *pkc,
988 struct tpacket_block_desc *pbd,
f6fb8f10 989 unsigned int len)
990{
991 struct tpacket3_hdr *ppd;
992
993 ppd = (struct tpacket3_hdr *)curr;
994 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
995 pkc->prev = curr;
996 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
997 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
998 BLOCK_NUM_PKTS(pbd) += 1;
999 atomic_inc(&pkc->blk_fill_in_prog);
1000 prb_run_all_ft_ops(pkc, ppd);
1001}
1002
1003/* Assumes caller has the sk->rx_queue.lock */
1004static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1005 struct sk_buff *skb,
1006 int status,
1007 unsigned int len
1008 )
1009{
bc59ba39 1010 struct tpacket_kbdq_core *pkc;
1011 struct tpacket_block_desc *pbd;
f6fb8f10 1012 char *curr, *end;
1013
e3192690 1014 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
f6fb8f10 1015 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1016
1017 /* Queue is frozen when user space is lagging behind */
1018 if (prb_queue_frozen(pkc)) {
1019 /*
1020 * Check if that last block which caused the queue to freeze,
1021 * is still in_use by user-space.
1022 */
1023 if (prb_curr_blk_in_use(pkc, pbd)) {
1024 /* Can't record this packet */
1025 return NULL;
1026 } else {
1027 /*
1028 * Ok, the block was released by user-space.
1029 * Now let's open that block.
1030 * opening a block also thaws the queue.
1031 * Thawing is a side effect.
1032 */
1033 prb_open_block(pkc, pbd);
1034 }
1035 }
1036
1037 smp_mb();
1038 curr = pkc->nxt_offset;
1039 pkc->skb = skb;
e3192690 1040 end = (char *)pbd + pkc->kblk_size;
f6fb8f10 1041
1042 /* first try the current block */
1043 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1044 prb_fill_curr_block(curr, pkc, pbd, len);
1045 return (void *)curr;
1046 }
1047
1048 /* Ok, close the current block */
1049 prb_retire_current_block(pkc, po, 0);
1050
1051 /* Now, try to dispatch the next block */
1052 curr = (char *)prb_dispatch_next_block(pkc, po);
1053 if (curr) {
1054 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1055 prb_fill_curr_block(curr, pkc, pbd, len);
1056 return (void *)curr;
1057 }
1058
1059 /*
1060 * No free blocks are available.user_space hasn't caught up yet.
1061 * Queue was just frozen and now this packet will get dropped.
1062 */
1063 return NULL;
1064}
1065
eea49cc9 1066static void *packet_current_rx_frame(struct packet_sock *po,
f6fb8f10 1067 struct sk_buff *skb,
1068 int status, unsigned int len)
1069{
1070 char *curr = NULL;
1071 switch (po->tp_version) {
1072 case TPACKET_V1:
1073 case TPACKET_V2:
1074 curr = packet_lookup_frame(po, &po->rx_ring,
1075 po->rx_ring.head, status);
1076 return curr;
1077 case TPACKET_V3:
1078 return __packet_lookup_frame_in_block(po, skb, status, len);
1079 default:
1080 WARN(1, "TPACKET version not supported\n");
1081 BUG();
99aa3473 1082 return NULL;
f6fb8f10 1083 }
1084}
1085
eea49cc9 1086static void *prb_lookup_block(struct packet_sock *po,
f6fb8f10 1087 struct packet_ring_buffer *rb,
1088 unsigned int previous,
1089 int status)
1090{
bc59ba39 1091 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1092 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, previous);
f6fb8f10 1093
1094 if (status != BLOCK_STATUS(pbd))
1095 return NULL;
1096 return pbd;
1097}
1098
eea49cc9 1099static int prb_previous_blk_num(struct packet_ring_buffer *rb)
f6fb8f10 1100{
1101 unsigned int prev;
1102 if (rb->prb_bdqc.kactive_blk_num)
1103 prev = rb->prb_bdqc.kactive_blk_num-1;
1104 else
1105 prev = rb->prb_bdqc.knum_blocks-1;
1106 return prev;
1107}
1108
1109/* Assumes caller has held the rx_queue.lock */
eea49cc9 1110static void *__prb_previous_block(struct packet_sock *po,
f6fb8f10 1111 struct packet_ring_buffer *rb,
1112 int status)
1113{
1114 unsigned int previous = prb_previous_blk_num(rb);
1115 return prb_lookup_block(po, rb, previous, status);
1116}
1117
eea49cc9 1118static void *packet_previous_rx_frame(struct packet_sock *po,
f6fb8f10 1119 struct packet_ring_buffer *rb,
1120 int status)
1121{
1122 if (po->tp_version <= TPACKET_V2)
1123 return packet_previous_frame(po, rb, status);
1124
1125 return __prb_previous_block(po, rb, status);
1126}
1127
eea49cc9 1128static void packet_increment_rx_head(struct packet_sock *po,
f6fb8f10 1129 struct packet_ring_buffer *rb)
1130{
1131 switch (po->tp_version) {
1132 case TPACKET_V1:
1133 case TPACKET_V2:
1134 return packet_increment_head(rb);
1135 case TPACKET_V3:
1136 default:
1137 WARN(1, "TPACKET version not supported.\n");
1138 BUG();
1139 return;
1140 }
1141}
1142
eea49cc9 1143static void *packet_previous_frame(struct packet_sock *po,
69e3c75f
JB
1144 struct packet_ring_buffer *rb,
1145 int status)
1146{
1147 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1148 return packet_lookup_frame(po, rb, previous, status);
1149}
1150
eea49cc9 1151static void packet_increment_head(struct packet_ring_buffer *buff)
69e3c75f
JB
1152{
1153 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1154}
1155
1da177e4
LT
1156static void packet_sock_destruct(struct sock *sk)
1157{
ed85b565
RC
1158 skb_queue_purge(&sk->sk_error_queue);
1159
547b792c
IJ
1160 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1161 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1da177e4
LT
1162
1163 if (!sock_flag(sk, SOCK_DEAD)) {
40d4e3df 1164 pr_err("Attempt to release alive packet socket: %p\n", sk);
1da177e4
LT
1165 return;
1166 }
1167
17ab56a2 1168 sk_refcnt_debug_dec(sk);
1da177e4
LT
1169}
1170
dc99f600
DM
1171static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
1172{
1173 int x = atomic_read(&f->rr_cur) + 1;
1174
1175 if (x >= num)
1176 x = 0;
1177
1178 return x;
1179}
1180
1181static struct sock *fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1182{
1183 u32 idx, hash = skb->rxhash;
1184
1185 idx = ((u64)hash * num) >> 32;
1186
1187 return f->arr[idx];
1188}
1189
1190static struct sock *fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1191{
1192 int cur, old;
1193
1194 cur = atomic_read(&f->rr_cur);
1195 while ((old = atomic_cmpxchg(&f->rr_cur, cur,
1196 fanout_rr_next(f, num))) != cur)
1197 cur = old;
1198 return f->arr[cur];
1199}
1200
95ec3eb4
DM
1201static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
1202{
1203 unsigned int cpu = smp_processor_id();
1204
1205 return f->arr[cpu % num];
1206}
1207
95ec3eb4
DM
1208static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1209 struct packet_type *pt, struct net_device *orig_dev)
dc99f600
DM
1210{
1211 struct packet_fanout *f = pt->af_packet_priv;
1212 unsigned int num = f->num_members;
1213 struct packet_sock *po;
1214 struct sock *sk;
1215
1216 if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1217 !num) {
1218 kfree_skb(skb);
1219 return 0;
1220 }
1221
95ec3eb4
DM
1222 switch (f->type) {
1223 case PACKET_FANOUT_HASH:
1224 default:
1225 if (f->defrag) {
bc416d97 1226 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
95ec3eb4
DM
1227 if (!skb)
1228 return 0;
1229 }
1230 skb_get_rxhash(skb);
1231 sk = fanout_demux_hash(f, skb, num);
1232 break;
1233 case PACKET_FANOUT_LB:
1234 sk = fanout_demux_lb(f, skb, num);
1235 break;
1236 case PACKET_FANOUT_CPU:
1237 sk = fanout_demux_cpu(f, skb, num);
1238 break;
dc99f600
DM
1239 }
1240
dc99f600
DM
1241 po = pkt_sk(sk);
1242
1243 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1244}
1245
1246static DEFINE_MUTEX(fanout_mutex);
1247static LIST_HEAD(fanout_list);
1248
1249static void __fanout_link(struct sock *sk, struct packet_sock *po)
1250{
1251 struct packet_fanout *f = po->fanout;
1252
1253 spin_lock(&f->lock);
1254 f->arr[f->num_members] = sk;
1255 smp_wmb();
1256 f->num_members++;
1257 spin_unlock(&f->lock);
1258}
1259
1260static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1261{
1262 struct packet_fanout *f = po->fanout;
1263 int i;
1264
1265 spin_lock(&f->lock);
1266 for (i = 0; i < f->num_members; i++) {
1267 if (f->arr[i] == sk)
1268 break;
1269 }
1270 BUG_ON(i >= f->num_members);
1271 f->arr[i] = f->arr[f->num_members - 1];
1272 f->num_members--;
1273 spin_unlock(&f->lock);
1274}
1275
c0de08d0
EL
1276bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
1277{
1278 if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
1279 return true;
1280
1281 return false;
1282}
1283
7736d33f 1284static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
dc99f600
DM
1285{
1286 struct packet_sock *po = pkt_sk(sk);
1287 struct packet_fanout *f, *match;
7736d33f
DM
1288 u8 type = type_flags & 0xff;
1289 u8 defrag = (type_flags & PACKET_FANOUT_FLAG_DEFRAG) ? 1 : 0;
dc99f600
DM
1290 int err;
1291
1292 switch (type) {
1293 case PACKET_FANOUT_HASH:
1294 case PACKET_FANOUT_LB:
95ec3eb4 1295 case PACKET_FANOUT_CPU:
dc99f600
DM
1296 break;
1297 default:
1298 return -EINVAL;
1299 }
1300
1301 if (!po->running)
1302 return -EINVAL;
1303
1304 if (po->fanout)
1305 return -EALREADY;
1306
1307 mutex_lock(&fanout_mutex);
1308 match = NULL;
1309 list_for_each_entry(f, &fanout_list, list) {
1310 if (f->id == id &&
1311 read_pnet(&f->net) == sock_net(sk)) {
1312 match = f;
1313 break;
1314 }
1315 }
afe62c68 1316 err = -EINVAL;
7736d33f 1317 if (match && match->defrag != defrag)
afe62c68 1318 goto out;
dc99f600 1319 if (!match) {
afe62c68 1320 err = -ENOMEM;
dc99f600 1321 match = kzalloc(sizeof(*match), GFP_KERNEL);
afe62c68
ED
1322 if (!match)
1323 goto out;
1324 write_pnet(&match->net, sock_net(sk));
1325 match->id = id;
1326 match->type = type;
1327 match->defrag = defrag;
1328 atomic_set(&match->rr_cur, 0);
1329 INIT_LIST_HEAD(&match->list);
1330 spin_lock_init(&match->lock);
1331 atomic_set(&match->sk_ref, 0);
1332 match->prot_hook.type = po->prot_hook.type;
1333 match->prot_hook.dev = po->prot_hook.dev;
1334 match->prot_hook.func = packet_rcv_fanout;
1335 match->prot_hook.af_packet_priv = match;
c0de08d0 1336 match->prot_hook.id_match = match_fanout_group;
afe62c68
ED
1337 dev_add_pack(&match->prot_hook);
1338 list_add(&match->list, &fanout_list);
dc99f600 1339 }
afe62c68
ED
1340 err = -EINVAL;
1341 if (match->type == type &&
1342 match->prot_hook.type == po->prot_hook.type &&
1343 match->prot_hook.dev == po->prot_hook.dev) {
1344 err = -ENOSPC;
1345 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1346 __dev_remove_pack(&po->prot_hook);
1347 po->fanout = match;
1348 atomic_inc(&match->sk_ref);
1349 __fanout_link(sk, po);
1350 err = 0;
dc99f600
DM
1351 }
1352 }
afe62c68 1353out:
dc99f600
DM
1354 mutex_unlock(&fanout_mutex);
1355 return err;
1356}
1357
1358static void fanout_release(struct sock *sk)
1359{
1360 struct packet_sock *po = pkt_sk(sk);
1361 struct packet_fanout *f;
1362
1363 f = po->fanout;
1364 if (!f)
1365 return;
1366
1367 po->fanout = NULL;
1368
1369 mutex_lock(&fanout_mutex);
1370 if (atomic_dec_and_test(&f->sk_ref)) {
1371 list_del(&f->list);
1372 dev_remove_pack(&f->prot_hook);
1373 kfree(f);
1374 }
1375 mutex_unlock(&fanout_mutex);
1376}
1da177e4 1377
90ddc4f0 1378static const struct proto_ops packet_ops;
1da177e4 1379
90ddc4f0 1380static const struct proto_ops packet_ops_spkt;
1da177e4 1381
40d4e3df
ED
1382static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1383 struct packet_type *pt, struct net_device *orig_dev)
1da177e4
LT
1384{
1385 struct sock *sk;
1386 struct sockaddr_pkt *spkt;
1387
1388 /*
1389 * When we registered the protocol we saved the socket in the data
1390 * field for just this event.
1391 */
1392
1393 sk = pt->af_packet_priv;
1ce4f28b 1394
1da177e4
LT
1395 /*
1396 * Yank back the headers [hope the device set this
1397 * right or kerboom...]
1398 *
1399 * Incoming packets have ll header pulled,
1400 * push it back.
1401 *
98e399f8 1402 * For outgoing ones skb->data == skb_mac_header(skb)
1da177e4
LT
1403 * so that this procedure is noop.
1404 */
1405
1406 if (skb->pkt_type == PACKET_LOOPBACK)
1407 goto out;
1408
09ad9bc7 1409 if (!net_eq(dev_net(dev), sock_net(sk)))
d12d01d6
DL
1410 goto out;
1411
40d4e3df
ED
1412 skb = skb_share_check(skb, GFP_ATOMIC);
1413 if (skb == NULL)
1da177e4
LT
1414 goto oom;
1415
1416 /* drop any routing info */
adf30907 1417 skb_dst_drop(skb);
1da177e4 1418
84531c24
PO
1419 /* drop conntrack reference */
1420 nf_reset(skb);
1421
ffbc6111 1422 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1da177e4 1423
98e399f8 1424 skb_push(skb, skb->data - skb_mac_header(skb));
1da177e4
LT
1425
1426 /*
1427 * The SOCK_PACKET socket receives _all_ frames.
1428 */
1429
1430 spkt->spkt_family = dev->type;
1431 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1432 spkt->spkt_protocol = skb->protocol;
1433
1434 /*
1435 * Charge the memory to the socket. This is done specifically
1436 * to prevent sockets using all the memory up.
1437 */
1438
40d4e3df 1439 if (sock_queue_rcv_skb(sk, skb) == 0)
1da177e4
LT
1440 return 0;
1441
1442out:
1443 kfree_skb(skb);
1444oom:
1445 return 0;
1446}
1447
1448
1449/*
1450 * Output a raw packet to a device layer. This bypasses all the other
1451 * protocol layers and you must therefore supply it with a complete frame
1452 */
1ce4f28b 1453
1da177e4
LT
1454static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1455 struct msghdr *msg, size_t len)
1456{
1457 struct sock *sk = sock->sk;
40d4e3df 1458 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
1a35ca80 1459 struct sk_buff *skb = NULL;
1da177e4 1460 struct net_device *dev;
40d4e3df 1461 __be16 proto = 0;
1da177e4 1462 int err;
3bdc0eba 1463 int extra_len = 0;
1ce4f28b 1464
1da177e4 1465 /*
1ce4f28b 1466 * Get and verify the address.
1da177e4
LT
1467 */
1468
40d4e3df 1469 if (saddr) {
1da177e4 1470 if (msg->msg_namelen < sizeof(struct sockaddr))
40d4e3df
ED
1471 return -EINVAL;
1472 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1473 proto = saddr->spkt_protocol;
1474 } else
1475 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
1da177e4
LT
1476
1477 /*
1ce4f28b 1478 * Find the device first to size check it
1da177e4
LT
1479 */
1480
de74e92a 1481 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1a35ca80 1482retry:
654d1f8a
ED
1483 rcu_read_lock();
1484 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1da177e4
LT
1485 err = -ENODEV;
1486 if (dev == NULL)
1487 goto out_unlock;
1ce4f28b 1488
d5e76b0a
DM
1489 err = -ENETDOWN;
1490 if (!(dev->flags & IFF_UP))
1491 goto out_unlock;
1492
1da177e4 1493 /*
40d4e3df
ED
1494 * You may not queue a frame bigger than the mtu. This is the lowest level
1495 * raw protocol and you must do your own fragmentation at this level.
1da177e4 1496 */
1ce4f28b 1497
3bdc0eba
BG
1498 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1499 if (!netif_supports_nofcs(dev)) {
1500 err = -EPROTONOSUPPORT;
1501 goto out_unlock;
1502 }
1503 extra_len = 4; /* We're doing our own CRC */
1504 }
1505
1da177e4 1506 err = -EMSGSIZE;
3bdc0eba 1507 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1da177e4
LT
1508 goto out_unlock;
1509
1a35ca80
ED
1510 if (!skb) {
1511 size_t reserved = LL_RESERVED_SPACE(dev);
4ce40912 1512 int tlen = dev->needed_tailroom;
1a35ca80
ED
1513 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1514
1515 rcu_read_unlock();
4ce40912 1516 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1a35ca80
ED
1517 if (skb == NULL)
1518 return -ENOBUFS;
1519 /* FIXME: Save some space for broken drivers that write a hard
1520 * header at transmission time by themselves. PPP is the notable
1521 * one here. This should really be fixed at the driver level.
1522 */
1523 skb_reserve(skb, reserved);
1524 skb_reset_network_header(skb);
1525
1526 /* Try to align data part correctly */
1527 if (hhlen) {
1528 skb->data -= hhlen;
1529 skb->tail -= hhlen;
1530 if (len < hhlen)
1531 skb_reset_network_header(skb);
1532 }
1533 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1534 if (err)
1535 goto out_free;
1536 goto retry;
1da177e4
LT
1537 }
1538
3bdc0eba 1539 if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
57f89bfa
BG
1540 /* Earlier code assumed this would be a VLAN pkt,
1541 * double-check this now that we have the actual
1542 * packet in hand.
1543 */
1544 struct ethhdr *ehdr;
1545 skb_reset_mac_header(skb);
1546 ehdr = eth_hdr(skb);
1547 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1548 err = -EMSGSIZE;
1549 goto out_unlock;
1550 }
1551 }
1a35ca80 1552
1da177e4
LT
1553 skb->protocol = proto;
1554 skb->dev = dev;
1555 skb->priority = sk->sk_priority;
2d37a186 1556 skb->mark = sk->sk_mark;
2244d07b 1557 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
ed85b565
RC
1558 if (err < 0)
1559 goto out_unlock;
1da177e4 1560
3bdc0eba
BG
1561 if (unlikely(extra_len == 4))
1562 skb->no_fcs = 1;
1563
1da177e4 1564 dev_queue_xmit(skb);
654d1f8a 1565 rcu_read_unlock();
40d4e3df 1566 return len;
1da177e4 1567
1da177e4 1568out_unlock:
654d1f8a 1569 rcu_read_unlock();
1a35ca80
ED
1570out_free:
1571 kfree_skb(skb);
1da177e4
LT
1572 return err;
1573}
1da177e4 1574
eea49cc9 1575static unsigned int run_filter(const struct sk_buff *skb,
62ab0812 1576 const struct sock *sk,
dbcb5855 1577 unsigned int res)
1da177e4
LT
1578{
1579 struct sk_filter *filter;
fda9ef5d 1580
80f8f102
ED
1581 rcu_read_lock();
1582 filter = rcu_dereference(sk->sk_filter);
dbcb5855 1583 if (filter != NULL)
0a14842f 1584 res = SK_RUN_FILTER(filter, skb);
80f8f102 1585 rcu_read_unlock();
1da177e4 1586
dbcb5855 1587 return res;
1da177e4
LT
1588}
1589
1590/*
62ab0812
ED
1591 * This function makes lazy skb cloning in hope that most of packets
1592 * are discarded by BPF.
1593 *
1594 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1595 * and skb->cb are mangled. It works because (and until) packets
1596 * falling here are owned by current CPU. Output packets are cloned
1597 * by dev_queue_xmit_nit(), input packets are processed by net_bh
1598 * sequencially, so that if we return skb to original state on exit,
1599 * we will not harm anyone.
1da177e4
LT
1600 */
1601
40d4e3df
ED
1602static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1603 struct packet_type *pt, struct net_device *orig_dev)
1da177e4
LT
1604{
1605 struct sock *sk;
1606 struct sockaddr_ll *sll;
1607 struct packet_sock *po;
40d4e3df 1608 u8 *skb_head = skb->data;
1da177e4 1609 int skb_len = skb->len;
dbcb5855 1610 unsigned int snaplen, res;
1da177e4
LT
1611
1612 if (skb->pkt_type == PACKET_LOOPBACK)
1613 goto drop;
1614
1615 sk = pt->af_packet_priv;
1616 po = pkt_sk(sk);
1617
09ad9bc7 1618 if (!net_eq(dev_net(dev), sock_net(sk)))
d12d01d6
DL
1619 goto drop;
1620
1da177e4
LT
1621 skb->dev = dev;
1622
3b04ddde 1623 if (dev->header_ops) {
1da177e4 1624 /* The device has an explicit notion of ll header,
62ab0812
ED
1625 * exported to higher levels.
1626 *
1627 * Otherwise, the device hides details of its frame
1628 * structure, so that corresponding packet head is
1629 * never delivered to user.
1da177e4
LT
1630 */
1631 if (sk->sk_type != SOCK_DGRAM)
98e399f8 1632 skb_push(skb, skb->data - skb_mac_header(skb));
1da177e4
LT
1633 else if (skb->pkt_type == PACKET_OUTGOING) {
1634 /* Special case: outgoing packets have ll header at head */
bbe735e4 1635 skb_pull(skb, skb_network_offset(skb));
1da177e4
LT
1636 }
1637 }
1638
1639 snaplen = skb->len;
1640
dbcb5855
DM
1641 res = run_filter(skb, sk, snaplen);
1642 if (!res)
fda9ef5d 1643 goto drop_n_restore;
dbcb5855
DM
1644 if (snaplen > res)
1645 snaplen = res;
1da177e4 1646
0fd7bac6 1647 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1da177e4
LT
1648 goto drop_n_acct;
1649
1650 if (skb_shared(skb)) {
1651 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1652 if (nskb == NULL)
1653 goto drop_n_acct;
1654
1655 if (skb_head != skb->data) {
1656 skb->data = skb_head;
1657 skb->len = skb_len;
1658 }
abc4e4fa 1659 consume_skb(skb);
1da177e4
LT
1660 skb = nskb;
1661 }
1662
ffbc6111
HX
1663 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
1664 sizeof(skb->cb));
1665
1666 sll = &PACKET_SKB_CB(skb)->sa.ll;
1da177e4
LT
1667 sll->sll_family = AF_PACKET;
1668 sll->sll_hatype = dev->type;
1669 sll->sll_protocol = skb->protocol;
1670 sll->sll_pkttype = skb->pkt_type;
8032b464 1671 if (unlikely(po->origdev))
80feaacb
PWJ
1672 sll->sll_ifindex = orig_dev->ifindex;
1673 else
1674 sll->sll_ifindex = dev->ifindex;
1da177e4 1675
b95cce35 1676 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1da177e4 1677
ffbc6111 1678 PACKET_SKB_CB(skb)->origlen = skb->len;
8dc41944 1679
1da177e4
LT
1680 if (pskb_trim(skb, snaplen))
1681 goto drop_n_acct;
1682
1683 skb_set_owner_r(skb, sk);
1684 skb->dev = NULL;
adf30907 1685 skb_dst_drop(skb);
1da177e4 1686
84531c24
PO
1687 /* drop conntrack reference */
1688 nf_reset(skb);
1689
1da177e4
LT
1690 spin_lock(&sk->sk_receive_queue.lock);
1691 po->stats.tp_packets++;
3b885787 1692 skb->dropcount = atomic_read(&sk->sk_drops);
1da177e4
LT
1693 __skb_queue_tail(&sk->sk_receive_queue, skb);
1694 spin_unlock(&sk->sk_receive_queue.lock);
1695 sk->sk_data_ready(sk, skb->len);
1696 return 0;
1697
1698drop_n_acct:
7091fbd8
WB
1699 spin_lock(&sk->sk_receive_queue.lock);
1700 po->stats.tp_drops++;
1701 atomic_inc(&sk->sk_drops);
1702 spin_unlock(&sk->sk_receive_queue.lock);
1da177e4
LT
1703
1704drop_n_restore:
1705 if (skb_head != skb->data && skb_shared(skb)) {
1706 skb->data = skb_head;
1707 skb->len = skb_len;
1708 }
1709drop:
ead2ceb0 1710 consume_skb(skb);
1da177e4
LT
1711 return 0;
1712}
1713
40d4e3df
ED
1714static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1715 struct packet_type *pt, struct net_device *orig_dev)
1da177e4
LT
1716{
1717 struct sock *sk;
1718 struct packet_sock *po;
1719 struct sockaddr_ll *sll;
bbd6ef87
PM
1720 union {
1721 struct tpacket_hdr *h1;
1722 struct tpacket2_hdr *h2;
f6fb8f10 1723 struct tpacket3_hdr *h3;
bbd6ef87
PM
1724 void *raw;
1725 } h;
40d4e3df 1726 u8 *skb_head = skb->data;
1da177e4 1727 int skb_len = skb->len;
dbcb5855 1728 unsigned int snaplen, res;
f6fb8f10 1729 unsigned long status = TP_STATUS_USER;
bbd6ef87 1730 unsigned short macoff, netoff, hdrlen;
1da177e4 1731 struct sk_buff *copy_skb = NULL;
b7aa0bf7 1732 struct timeval tv;
bbd6ef87 1733 struct timespec ts;
614f60fa 1734 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
1da177e4
LT
1735
1736 if (skb->pkt_type == PACKET_LOOPBACK)
1737 goto drop;
1738
1739 sk = pt->af_packet_priv;
1740 po = pkt_sk(sk);
1741
09ad9bc7 1742 if (!net_eq(dev_net(dev), sock_net(sk)))
d12d01d6
DL
1743 goto drop;
1744
3b04ddde 1745 if (dev->header_ops) {
1da177e4 1746 if (sk->sk_type != SOCK_DGRAM)
98e399f8 1747 skb_push(skb, skb->data - skb_mac_header(skb));
1da177e4
LT
1748 else if (skb->pkt_type == PACKET_OUTGOING) {
1749 /* Special case: outgoing packets have ll header at head */
bbe735e4 1750 skb_pull(skb, skb_network_offset(skb));
1da177e4
LT
1751 }
1752 }
1753
8dc41944
HX
1754 if (skb->ip_summed == CHECKSUM_PARTIAL)
1755 status |= TP_STATUS_CSUMNOTREADY;
1756
1da177e4
LT
1757 snaplen = skb->len;
1758
dbcb5855
DM
1759 res = run_filter(skb, sk, snaplen);
1760 if (!res)
fda9ef5d 1761 goto drop_n_restore;
dbcb5855
DM
1762 if (snaplen > res)
1763 snaplen = res;
1da177e4
LT
1764
1765 if (sk->sk_type == SOCK_DGRAM) {
8913336a
PM
1766 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1767 po->tp_reserve;
1da177e4 1768 } else {
95c96174 1769 unsigned int maclen = skb_network_offset(skb);
bbd6ef87 1770 netoff = TPACKET_ALIGN(po->tp_hdrlen +
8913336a
PM
1771 (maclen < 16 ? 16 : maclen)) +
1772 po->tp_reserve;
1da177e4
LT
1773 macoff = netoff - maclen;
1774 }
f6fb8f10 1775 if (po->tp_version <= TPACKET_V2) {
1776 if (macoff + snaplen > po->rx_ring.frame_size) {
1777 if (po->copy_thresh &&
0fd7bac6 1778 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
f6fb8f10 1779 if (skb_shared(skb)) {
1780 copy_skb = skb_clone(skb, GFP_ATOMIC);
1781 } else {
1782 copy_skb = skb_get(skb);
1783 skb_head = skb->data;
1784 }
1785 if (copy_skb)
1786 skb_set_owner_r(copy_skb, sk);
1da177e4 1787 }
f6fb8f10 1788 snaplen = po->rx_ring.frame_size - macoff;
1789 if ((int)snaplen < 0)
1790 snaplen = 0;
1da177e4 1791 }
1da177e4 1792 }
1da177e4 1793 spin_lock(&sk->sk_receive_queue.lock);
f6fb8f10 1794 h.raw = packet_current_rx_frame(po, skb,
1795 TP_STATUS_KERNEL, (macoff+snaplen));
bbd6ef87 1796 if (!h.raw)
1da177e4 1797 goto ring_is_full;
f6fb8f10 1798 if (po->tp_version <= TPACKET_V2) {
1799 packet_increment_rx_head(po, &po->rx_ring);
1800 /*
1801 * LOSING will be reported till you read the stats,
1802 * because it's COR - Clear On Read.
1803 * Anyways, moving it for V1/V2 only as V3 doesn't need this
1804 * at packet level.
1805 */
1806 if (po->stats.tp_drops)
1807 status |= TP_STATUS_LOSING;
1808 }
1da177e4
LT
1809 po->stats.tp_packets++;
1810 if (copy_skb) {
1811 status |= TP_STATUS_COPY;
1812 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1813 }
1da177e4
LT
1814 spin_unlock(&sk->sk_receive_queue.lock);
1815
bbd6ef87 1816 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
1da177e4 1817
bbd6ef87
PM
1818 switch (po->tp_version) {
1819 case TPACKET_V1:
1820 h.h1->tp_len = skb->len;
1821 h.h1->tp_snaplen = snaplen;
1822 h.h1->tp_mac = macoff;
1823 h.h1->tp_net = netoff;
614f60fa
SM
1824 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1825 && shhwtstamps->syststamp.tv64)
1826 tv = ktime_to_timeval(shhwtstamps->syststamp);
1827 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1828 && shhwtstamps->hwtstamp.tv64)
1829 tv = ktime_to_timeval(shhwtstamps->hwtstamp);
1830 else if (skb->tstamp.tv64)
bbd6ef87
PM
1831 tv = ktime_to_timeval(skb->tstamp);
1832 else
1833 do_gettimeofday(&tv);
1834 h.h1->tp_sec = tv.tv_sec;
1835 h.h1->tp_usec = tv.tv_usec;
1836 hdrlen = sizeof(*h.h1);
1837 break;
1838 case TPACKET_V2:
1839 h.h2->tp_len = skb->len;
1840 h.h2->tp_snaplen = snaplen;
1841 h.h2->tp_mac = macoff;
1842 h.h2->tp_net = netoff;
614f60fa
SM
1843 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1844 && shhwtstamps->syststamp.tv64)
1845 ts = ktime_to_timespec(shhwtstamps->syststamp);
1846 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1847 && shhwtstamps->hwtstamp.tv64)
1848 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1849 else if (skb->tstamp.tv64)
bbd6ef87
PM
1850 ts = ktime_to_timespec(skb->tstamp);
1851 else
1852 getnstimeofday(&ts);
1853 h.h2->tp_sec = ts.tv_sec;
1854 h.h2->tp_nsec = ts.tv_nsec;
a3bcc23e
BG
1855 if (vlan_tx_tag_present(skb)) {
1856 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
1857 status |= TP_STATUS_VLAN_VALID;
1858 } else {
1859 h.h2->tp_vlan_tci = 0;
1860 }
13fcb7bd 1861 h.h2->tp_padding = 0;
bbd6ef87
PM
1862 hdrlen = sizeof(*h.h2);
1863 break;
f6fb8f10 1864 case TPACKET_V3:
1865 /* tp_nxt_offset,vlan are already populated above.
1866 * So DONT clear those fields here
1867 */
1868 h.h3->tp_status |= status;
1869 h.h3->tp_len = skb->len;
1870 h.h3->tp_snaplen = snaplen;
1871 h.h3->tp_mac = macoff;
1872 h.h3->tp_net = netoff;
1873 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1874 && shhwtstamps->syststamp.tv64)
1875 ts = ktime_to_timespec(shhwtstamps->syststamp);
1876 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1877 && shhwtstamps->hwtstamp.tv64)
1878 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1879 else if (skb->tstamp.tv64)
1880 ts = ktime_to_timespec(skb->tstamp);
1881 else
1882 getnstimeofday(&ts);
1883 h.h3->tp_sec = ts.tv_sec;
1884 h.h3->tp_nsec = ts.tv_nsec;
1885 hdrlen = sizeof(*h.h3);
1886 break;
bbd6ef87
PM
1887 default:
1888 BUG();
1889 }
1da177e4 1890
bbd6ef87 1891 sll = h.raw + TPACKET_ALIGN(hdrlen);
b95cce35 1892 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1da177e4
LT
1893 sll->sll_family = AF_PACKET;
1894 sll->sll_hatype = dev->type;
1895 sll->sll_protocol = skb->protocol;
1896 sll->sll_pkttype = skb->pkt_type;
8032b464 1897 if (unlikely(po->origdev))
80feaacb
PWJ
1898 sll->sll_ifindex = orig_dev->ifindex;
1899 else
1900 sll->sll_ifindex = dev->ifindex;
1da177e4 1901
e16aa207 1902 smp_mb();
f6dafa95 1903#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
1da177e4 1904 {
0af55bb5
CG
1905 u8 *start, *end;
1906
f6fb8f10 1907 if (po->tp_version <= TPACKET_V2) {
1908 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
1909 + macoff + snaplen);
1910 for (start = h.raw; start < end; start += PAGE_SIZE)
1911 flush_dcache_page(pgv_to_page(start));
1912 }
cc9f01b2 1913 smp_wmb();
1da177e4 1914 }
f6dafa95 1915#endif
f6fb8f10 1916 if (po->tp_version <= TPACKET_V2)
1917 __packet_set_status(po, h.raw, status);
1918 else
1919 prb_clear_blk_fill_status(&po->rx_ring);
1da177e4
LT
1920
1921 sk->sk_data_ready(sk, 0);
1922
1923drop_n_restore:
1924 if (skb_head != skb->data && skb_shared(skb)) {
1925 skb->data = skb_head;
1926 skb->len = skb_len;
1927 }
1928drop:
1ce4f28b 1929 kfree_skb(skb);
1da177e4
LT
1930 return 0;
1931
1932ring_is_full:
1933 po->stats.tp_drops++;
1934 spin_unlock(&sk->sk_receive_queue.lock);
1935
1936 sk->sk_data_ready(sk, 0);
acb5d75b 1937 kfree_skb(copy_skb);
1da177e4
LT
1938 goto drop_n_restore;
1939}
1940
69e3c75f
JB
1941static void tpacket_destruct_skb(struct sk_buff *skb)
1942{
1943 struct packet_sock *po = pkt_sk(skb->sk);
40d4e3df 1944 void *ph;
1da177e4 1945
69e3c75f
JB
1946 if (likely(po->tx_ring.pg_vec)) {
1947 ph = skb_shinfo(skb)->destructor_arg;
69e3c75f
JB
1948 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
1949 atomic_dec(&po->tx_ring.pending);
1950 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
1951 }
1952
1953 sock_wfree(skb);
1954}
1955
40d4e3df
ED
1956static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1957 void *frame, struct net_device *dev, int size_max,
ae641949 1958 __be16 proto, unsigned char *addr, int hlen)
69e3c75f
JB
1959{
1960 union {
1961 struct tpacket_hdr *h1;
1962 struct tpacket2_hdr *h2;
1963 void *raw;
1964 } ph;
1965 int to_write, offset, len, tp_len, nr_frags, len_max;
1966 struct socket *sock = po->sk.sk_socket;
1967 struct page *page;
1968 void *data;
1969 int err;
1970
1971 ph.raw = frame;
1972
1973 skb->protocol = proto;
1974 skb->dev = dev;
1975 skb->priority = po->sk.sk_priority;
2d37a186 1976 skb->mark = po->sk.sk_mark;
69e3c75f
JB
1977 skb_shinfo(skb)->destructor_arg = ph.raw;
1978
1979 switch (po->tp_version) {
1980 case TPACKET_V2:
1981 tp_len = ph.h2->tp_len;
1982 break;
1983 default:
1984 tp_len = ph.h1->tp_len;
1985 break;
1986 }
1987 if (unlikely(tp_len > size_max)) {
40d4e3df 1988 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
69e3c75f
JB
1989 return -EMSGSIZE;
1990 }
1991
ae641949 1992 skb_reserve(skb, hlen);
69e3c75f
JB
1993 skb_reset_network_header(skb);
1994
1995 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
1996 to_write = tp_len;
1997
1998 if (sock->type == SOCK_DGRAM) {
1999 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2000 NULL, tp_len);
2001 if (unlikely(err < 0))
2002 return -EINVAL;
40d4e3df 2003 } else if (dev->hard_header_len) {
69e3c75f
JB
2004 /* net device doesn't like empty head */
2005 if (unlikely(tp_len <= dev->hard_header_len)) {
40d4e3df
ED
2006 pr_err("packet size is too short (%d < %d)\n",
2007 tp_len, dev->hard_header_len);
69e3c75f
JB
2008 return -EINVAL;
2009 }
2010
2011 skb_push(skb, dev->hard_header_len);
2012 err = skb_store_bits(skb, 0, data,
2013 dev->hard_header_len);
2014 if (unlikely(err))
2015 return err;
2016
2017 data += dev->hard_header_len;
2018 to_write -= dev->hard_header_len;
2019 }
2020
2021 err = -EFAULT;
69e3c75f
JB
2022 offset = offset_in_page(data);
2023 len_max = PAGE_SIZE - offset;
2024 len = ((to_write > len_max) ? len_max : to_write);
2025
2026 skb->data_len = to_write;
2027 skb->len += to_write;
2028 skb->truesize += to_write;
2029 atomic_add(to_write, &po->sk.sk_wmem_alloc);
2030
2031 while (likely(to_write)) {
2032 nr_frags = skb_shinfo(skb)->nr_frags;
2033
2034 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
40d4e3df
ED
2035 pr_err("Packet exceed the number of skb frags(%lu)\n",
2036 MAX_SKB_FRAGS);
69e3c75f
JB
2037 return -EFAULT;
2038 }
2039
0af55bb5
CG
2040 page = pgv_to_page(data);
2041 data += len;
69e3c75f
JB
2042 flush_dcache_page(page);
2043 get_page(page);
0af55bb5 2044 skb_fill_page_desc(skb, nr_frags, page, offset, len);
69e3c75f
JB
2045 to_write -= len;
2046 offset = 0;
2047 len_max = PAGE_SIZE;
2048 len = ((to_write > len_max) ? len_max : to_write);
2049 }
2050
2051 return tp_len;
2052}
2053
2054static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2055{
69e3c75f
JB
2056 struct sk_buff *skb;
2057 struct net_device *dev;
2058 __be16 proto;
827d9780
BG
2059 bool need_rls_dev = false;
2060 int err, reserve = 0;
40d4e3df
ED
2061 void *ph;
2062 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
69e3c75f
JB
2063 int tp_len, size_max;
2064 unsigned char *addr;
2065 int len_sum = 0;
2066 int status = 0;
ae641949 2067 int hlen, tlen;
69e3c75f 2068
69e3c75f
JB
2069 mutex_lock(&po->pg_vec_lock);
2070
2071 err = -EBUSY;
2072 if (saddr == NULL) {
827d9780 2073 dev = po->prot_hook.dev;
69e3c75f
JB
2074 proto = po->num;
2075 addr = NULL;
2076 } else {
2077 err = -EINVAL;
2078 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2079 goto out;
2080 if (msg->msg_namelen < (saddr->sll_halen
2081 + offsetof(struct sockaddr_ll,
2082 sll_addr)))
2083 goto out;
69e3c75f
JB
2084 proto = saddr->sll_protocol;
2085 addr = saddr->sll_addr;
827d9780
BG
2086 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2087 need_rls_dev = true;
69e3c75f
JB
2088 }
2089
69e3c75f
JB
2090 err = -ENXIO;
2091 if (unlikely(dev == NULL))
2092 goto out;
2093
2094 reserve = dev->hard_header_len;
2095
2096 err = -ENETDOWN;
2097 if (unlikely(!(dev->flags & IFF_UP)))
2098 goto out_put;
2099
2100 size_max = po->tx_ring.frame_size
b5dd884e 2101 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
69e3c75f
JB
2102
2103 if (size_max > dev->mtu + reserve)
2104 size_max = dev->mtu + reserve;
2105
2106 do {
2107 ph = packet_current_frame(po, &po->tx_ring,
2108 TP_STATUS_SEND_REQUEST);
2109
2110 if (unlikely(ph == NULL)) {
2111 schedule();
2112 continue;
2113 }
2114
2115 status = TP_STATUS_SEND_REQUEST;
ae641949
HX
2116 hlen = LL_RESERVED_SPACE(dev);
2117 tlen = dev->needed_tailroom;
69e3c75f 2118 skb = sock_alloc_send_skb(&po->sk,
ae641949 2119 hlen + tlen + sizeof(struct sockaddr_ll),
69e3c75f
JB
2120 0, &err);
2121
2122 if (unlikely(skb == NULL))
2123 goto out_status;
2124
2125 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
ae641949 2126 addr, hlen);
69e3c75f
JB
2127
2128 if (unlikely(tp_len < 0)) {
2129 if (po->tp_loss) {
2130 __packet_set_status(po, ph,
2131 TP_STATUS_AVAILABLE);
2132 packet_increment_head(&po->tx_ring);
2133 kfree_skb(skb);
2134 continue;
2135 } else {
2136 status = TP_STATUS_WRONG_FORMAT;
2137 err = tp_len;
2138 goto out_status;
2139 }
2140 }
2141
2142 skb->destructor = tpacket_destruct_skb;
2143 __packet_set_status(po, ph, TP_STATUS_SENDING);
2144 atomic_inc(&po->tx_ring.pending);
2145
2146 status = TP_STATUS_SEND_REQUEST;
2147 err = dev_queue_xmit(skb);
eb70df13
JP
2148 if (unlikely(err > 0)) {
2149 err = net_xmit_errno(err);
2150 if (err && __packet_get_status(po, ph) ==
2151 TP_STATUS_AVAILABLE) {
2152 /* skb was destructed already */
2153 skb = NULL;
2154 goto out_status;
2155 }
2156 /*
2157 * skb was dropped but not destructed yet;
2158 * let's treat it like congestion or err < 0
2159 */
2160 err = 0;
2161 }
69e3c75f
JB
2162 packet_increment_head(&po->tx_ring);
2163 len_sum += tp_len;
f64f9e71
JP
2164 } while (likely((ph != NULL) ||
2165 ((!(msg->msg_flags & MSG_DONTWAIT)) &&
2166 (atomic_read(&po->tx_ring.pending))))
2167 );
69e3c75f
JB
2168
2169 err = len_sum;
2170 goto out_put;
2171
69e3c75f
JB
2172out_status:
2173 __packet_set_status(po, ph, status);
2174 kfree_skb(skb);
2175out_put:
827d9780
BG
2176 if (need_rls_dev)
2177 dev_put(dev);
69e3c75f
JB
2178out:
2179 mutex_unlock(&po->pg_vec_lock);
2180 return err;
2181}
69e3c75f 2182
eea49cc9
OJ
2183static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2184 size_t reserve, size_t len,
2185 size_t linear, int noblock,
2186 int *err)
bfd5f4a3
SS
2187{
2188 struct sk_buff *skb;
2189
2190 /* Under a page? Don't bother with paged skb. */
2191 if (prepad + len < PAGE_SIZE || !linear)
2192 linear = len;
2193
2194 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2195 err);
2196 if (!skb)
2197 return NULL;
2198
2199 skb_reserve(skb, reserve);
2200 skb_put(skb, linear);
2201 skb->data_len = len - linear;
2202 skb->len += len - linear;
2203
2204 return skb;
2205}
2206
69e3c75f 2207static int packet_snd(struct socket *sock,
1da177e4
LT
2208 struct msghdr *msg, size_t len)
2209{
2210 struct sock *sk = sock->sk;
40d4e3df 2211 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
1da177e4
LT
2212 struct sk_buff *skb;
2213 struct net_device *dev;
0e11c91e 2214 __be16 proto;
827d9780 2215 bool need_rls_dev = false;
1da177e4 2216 unsigned char *addr;
827d9780 2217 int err, reserve = 0;
bfd5f4a3
SS
2218 struct virtio_net_hdr vnet_hdr = { 0 };
2219 int offset = 0;
2220 int vnet_hdr_len;
2221 struct packet_sock *po = pkt_sk(sk);
2222 unsigned short gso_type = 0;
ae641949 2223 int hlen, tlen;
3bdc0eba 2224 int extra_len = 0;
1da177e4
LT
2225
2226 /*
1ce4f28b 2227 * Get and verify the address.
1da177e4 2228 */
1ce4f28b 2229
1da177e4 2230 if (saddr == NULL) {
827d9780 2231 dev = po->prot_hook.dev;
1da177e4
LT
2232 proto = po->num;
2233 addr = NULL;
2234 } else {
2235 err = -EINVAL;
2236 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2237 goto out;
0fb375fb
EB
2238 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2239 goto out;
1da177e4
LT
2240 proto = saddr->sll_protocol;
2241 addr = saddr->sll_addr;
827d9780
BG
2242 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2243 need_rls_dev = true;
1da177e4
LT
2244 }
2245
1da177e4
LT
2246 err = -ENXIO;
2247 if (dev == NULL)
2248 goto out_unlock;
2249 if (sock->type == SOCK_RAW)
2250 reserve = dev->hard_header_len;
2251
d5e76b0a
DM
2252 err = -ENETDOWN;
2253 if (!(dev->flags & IFF_UP))
2254 goto out_unlock;
2255
bfd5f4a3
SS
2256 if (po->has_vnet_hdr) {
2257 vnet_hdr_len = sizeof(vnet_hdr);
2258
2259 err = -EINVAL;
2260 if (len < vnet_hdr_len)
2261 goto out_unlock;
2262
2263 len -= vnet_hdr_len;
2264
2265 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
2266 vnet_hdr_len);
2267 if (err < 0)
2268 goto out_unlock;
2269
2270 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2271 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
2272 vnet_hdr.hdr_len))
2273 vnet_hdr.hdr_len = vnet_hdr.csum_start +
2274 vnet_hdr.csum_offset + 2;
2275
2276 err = -EINVAL;
2277 if (vnet_hdr.hdr_len > len)
2278 goto out_unlock;
2279
2280 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2281 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2282 case VIRTIO_NET_HDR_GSO_TCPV4:
2283 gso_type = SKB_GSO_TCPV4;
2284 break;
2285 case VIRTIO_NET_HDR_GSO_TCPV6:
2286 gso_type = SKB_GSO_TCPV6;
2287 break;
2288 case VIRTIO_NET_HDR_GSO_UDP:
2289 gso_type = SKB_GSO_UDP;
2290 break;
2291 default:
2292 goto out_unlock;
2293 }
2294
2295 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2296 gso_type |= SKB_GSO_TCP_ECN;
2297
2298 if (vnet_hdr.gso_size == 0)
2299 goto out_unlock;
2300
2301 }
2302 }
2303
3bdc0eba
BG
2304 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2305 if (!netif_supports_nofcs(dev)) {
2306 err = -EPROTONOSUPPORT;
2307 goto out_unlock;
2308 }
2309 extra_len = 4; /* We're doing our own CRC */
2310 }
2311
1da177e4 2312 err = -EMSGSIZE;
3bdc0eba 2313 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
1da177e4
LT
2314 goto out_unlock;
2315
bfd5f4a3 2316 err = -ENOBUFS;
ae641949
HX
2317 hlen = LL_RESERVED_SPACE(dev);
2318 tlen = dev->needed_tailroom;
2319 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
bfd5f4a3 2320 msg->msg_flags & MSG_DONTWAIT, &err);
40d4e3df 2321 if (skb == NULL)
1da177e4
LT
2322 goto out_unlock;
2323
bfd5f4a3 2324 skb_set_network_header(skb, reserve);
1da177e4 2325
0c4e8581
SH
2326 err = -EINVAL;
2327 if (sock->type == SOCK_DGRAM &&
bfd5f4a3 2328 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
0c4e8581 2329 goto out_free;
1da177e4
LT
2330
2331 /* Returns -EFAULT on error */
bfd5f4a3 2332 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
1da177e4
LT
2333 if (err)
2334 goto out_free;
2244d07b 2335 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
ed85b565
RC
2336 if (err < 0)
2337 goto out_free;
1da177e4 2338
3bdc0eba 2339 if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
57f89bfa
BG
2340 /* Earlier code assumed this would be a VLAN pkt,
2341 * double-check this now that we have the actual
2342 * packet in hand.
2343 */
2344 struct ethhdr *ehdr;
2345 skb_reset_mac_header(skb);
2346 ehdr = eth_hdr(skb);
2347 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2348 err = -EMSGSIZE;
2349 goto out_free;
2350 }
2351 }
2352
1da177e4
LT
2353 skb->protocol = proto;
2354 skb->dev = dev;
2355 skb->priority = sk->sk_priority;
2d37a186 2356 skb->mark = sk->sk_mark;
1da177e4 2357
bfd5f4a3
SS
2358 if (po->has_vnet_hdr) {
2359 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2360 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
2361 vnet_hdr.csum_offset)) {
2362 err = -EINVAL;
2363 goto out_free;
2364 }
2365 }
2366
2367 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
2368 skb_shinfo(skb)->gso_type = gso_type;
2369
2370 /* Header must be checked, and gso_segs computed. */
2371 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2372 skb_shinfo(skb)->gso_segs = 0;
2373
2374 len += vnet_hdr_len;
2375 }
2376
3bdc0eba
BG
2377 if (unlikely(extra_len == 4))
2378 skb->no_fcs = 1;
2379
1da177e4
LT
2380 /*
2381 * Now send it
2382 */
2383
2384 err = dev_queue_xmit(skb);
2385 if (err > 0 && (err = net_xmit_errno(err)) != 0)
2386 goto out_unlock;
2387
827d9780
BG
2388 if (need_rls_dev)
2389 dev_put(dev);
1da177e4 2390
40d4e3df 2391 return len;
1da177e4
LT
2392
2393out_free:
2394 kfree_skb(skb);
2395out_unlock:
827d9780 2396 if (dev && need_rls_dev)
1da177e4
LT
2397 dev_put(dev);
2398out:
2399 return err;
2400}
2401
69e3c75f
JB
2402static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
2403 struct msghdr *msg, size_t len)
2404{
69e3c75f
JB
2405 struct sock *sk = sock->sk;
2406 struct packet_sock *po = pkt_sk(sk);
2407 if (po->tx_ring.pg_vec)
2408 return tpacket_snd(po, msg);
2409 else
69e3c75f
JB
2410 return packet_snd(sock, msg, len);
2411}
2412
1da177e4
LT
2413/*
2414 * Close a PACKET socket. This is fairly simple. We immediately go
2415 * to 'closed' state and remove our protocol entry in the device list.
2416 */
2417
2418static int packet_release(struct socket *sock)
2419{
2420 struct sock *sk = sock->sk;
2421 struct packet_sock *po;
d12d01d6 2422 struct net *net;
f6fb8f10 2423 union tpacket_req_u req_u;
1da177e4
LT
2424
2425 if (!sk)
2426 return 0;
2427
3b1e0a65 2428 net = sock_net(sk);
1da177e4
LT
2429 po = pkt_sk(sk);
2430
808f5114 2431 spin_lock_bh(&net->packet.sklist_lock);
2432 sk_del_node_init_rcu(sk);
920de804 2433 sock_prot_inuse_add(net, sk->sk_prot, -1);
808f5114 2434 spin_unlock_bh(&net->packet.sklist_lock);
1da177e4 2435
808f5114 2436 spin_lock(&po->bind_lock);
ce06b03e 2437 unregister_prot_hook(sk, false);
160ff18a
BG
2438 if (po->prot_hook.dev) {
2439 dev_put(po->prot_hook.dev);
2440 po->prot_hook.dev = NULL;
2441 }
808f5114 2442 spin_unlock(&po->bind_lock);
1da177e4 2443
1da177e4 2444 packet_flush_mclist(sk);
1da177e4 2445
f6fb8f10 2446 memset(&req_u, 0, sizeof(req_u));
69e3c75f
JB
2447
2448 if (po->rx_ring.pg_vec)
f6fb8f10 2449 packet_set_ring(sk, &req_u, 1, 0);
69e3c75f
JB
2450
2451 if (po->tx_ring.pg_vec)
f6fb8f10 2452 packet_set_ring(sk, &req_u, 1, 1);
1da177e4 2453
dc99f600
DM
2454 fanout_release(sk);
2455
808f5114 2456 synchronize_net();
1da177e4
LT
2457 /*
2458 * Now the socket is dead. No more input will appear.
2459 */
1da177e4
LT
2460 sock_orphan(sk);
2461 sock->sk = NULL;
2462
2463 /* Purge queues */
2464
2465 skb_queue_purge(&sk->sk_receive_queue);
17ab56a2 2466 sk_refcnt_debug_release(sk);
1da177e4
LT
2467
2468 sock_put(sk);
2469 return 0;
2470}
2471
2472/*
2473 * Attach a packet hook.
2474 */
2475
0e11c91e 2476static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
1da177e4
LT
2477{
2478 struct packet_sock *po = pkt_sk(sk);
dc99f600 2479
aef950b4
WY
2480 if (po->fanout) {
2481 if (dev)
2482 dev_put(dev);
2483
dc99f600 2484 return -EINVAL;
aef950b4 2485 }
1da177e4
LT
2486
2487 lock_sock(sk);
2488
2489 spin_lock(&po->bind_lock);
ce06b03e 2490 unregister_prot_hook(sk, true);
1da177e4
LT
2491 po->num = protocol;
2492 po->prot_hook.type = protocol;
160ff18a
BG
2493 if (po->prot_hook.dev)
2494 dev_put(po->prot_hook.dev);
1da177e4
LT
2495 po->prot_hook.dev = dev;
2496
2497 po->ifindex = dev ? dev->ifindex : 0;
2498
2499 if (protocol == 0)
2500 goto out_unlock;
2501
be85d4ad 2502 if (!dev || (dev->flags & IFF_UP)) {
ce06b03e 2503 register_prot_hook(sk);
be85d4ad
UT
2504 } else {
2505 sk->sk_err = ENETDOWN;
2506 if (!sock_flag(sk, SOCK_DEAD))
2507 sk->sk_error_report(sk);
1da177e4
LT
2508 }
2509
2510out_unlock:
2511 spin_unlock(&po->bind_lock);
2512 release_sock(sk);
2513 return 0;
2514}
2515
2516/*
2517 * Bind a packet socket to a device
2518 */
2519
40d4e3df
ED
2520static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2521 int addr_len)
1da177e4 2522{
40d4e3df 2523 struct sock *sk = sock->sk;
1da177e4
LT
2524 char name[15];
2525 struct net_device *dev;
2526 int err = -ENODEV;
1ce4f28b 2527
1da177e4
LT
2528 /*
2529 * Check legality
2530 */
1ce4f28b 2531
8ae55f04 2532 if (addr_len != sizeof(struct sockaddr))
1da177e4 2533 return -EINVAL;
40d4e3df 2534 strlcpy(name, uaddr->sa_data, sizeof(name));
1da177e4 2535
3b1e0a65 2536 dev = dev_get_by_name(sock_net(sk), name);
160ff18a 2537 if (dev)
1da177e4 2538 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
1da177e4
LT
2539 return err;
2540}
1da177e4
LT
2541
2542static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2543{
40d4e3df
ED
2544 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2545 struct sock *sk = sock->sk;
1da177e4
LT
2546 struct net_device *dev = NULL;
2547 int err;
2548
2549
2550 /*
2551 * Check legality
2552 */
1ce4f28b 2553
1da177e4
LT
2554 if (addr_len < sizeof(struct sockaddr_ll))
2555 return -EINVAL;
2556 if (sll->sll_family != AF_PACKET)
2557 return -EINVAL;
2558
2559 if (sll->sll_ifindex) {
2560 err = -ENODEV;
3b1e0a65 2561 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
1da177e4
LT
2562 if (dev == NULL)
2563 goto out;
2564 }
2565 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
1da177e4
LT
2566
2567out:
2568 return err;
2569}
2570
2571static struct proto packet_proto = {
2572 .name = "PACKET",
2573 .owner = THIS_MODULE,
2574 .obj_size = sizeof(struct packet_sock),
2575};
2576
2577/*
1ce4f28b 2578 * Create a packet of type SOCK_PACKET.
1da177e4
LT
2579 */
2580
3f378b68
EP
2581static int packet_create(struct net *net, struct socket *sock, int protocol,
2582 int kern)
1da177e4
LT
2583{
2584 struct sock *sk;
2585 struct packet_sock *po;
0e11c91e 2586 __be16 proto = (__force __be16)protocol; /* weird, but documented */
1da177e4
LT
2587 int err;
2588
2589 if (!capable(CAP_NET_RAW))
2590 return -EPERM;
be02097c
DM
2591 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2592 sock->type != SOCK_PACKET)
1da177e4
LT
2593 return -ESOCKTNOSUPPORT;
2594
2595 sock->state = SS_UNCONNECTED;
2596
2597 err = -ENOBUFS;
6257ff21 2598 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
1da177e4
LT
2599 if (sk == NULL)
2600 goto out;
2601
2602 sock->ops = &packet_ops;
1da177e4
LT
2603 if (sock->type == SOCK_PACKET)
2604 sock->ops = &packet_ops_spkt;
be02097c 2605
1da177e4
LT
2606 sock_init_data(sock, sk);
2607
2608 po = pkt_sk(sk);
2609 sk->sk_family = PF_PACKET;
0e11c91e 2610 po->num = proto;
1da177e4
LT
2611
2612 sk->sk_destruct = packet_sock_destruct;
17ab56a2 2613 sk_refcnt_debug_inc(sk);
1da177e4
LT
2614
2615 /*
2616 * Attach a protocol block
2617 */
2618
2619 spin_lock_init(&po->bind_lock);
905db440 2620 mutex_init(&po->pg_vec_lock);
1da177e4 2621 po->prot_hook.func = packet_rcv;
be02097c 2622
1da177e4
LT
2623 if (sock->type == SOCK_PACKET)
2624 po->prot_hook.func = packet_rcv_spkt;
be02097c 2625
1da177e4
LT
2626 po->prot_hook.af_packet_priv = sk;
2627
0e11c91e
AV
2628 if (proto) {
2629 po->prot_hook.type = proto;
ce06b03e 2630 register_prot_hook(sk);
1da177e4
LT
2631 }
2632
808f5114 2633 spin_lock_bh(&net->packet.sklist_lock);
2634 sk_add_node_rcu(sk, &net->packet.sklist);
3680453c 2635 sock_prot_inuse_add(net, &packet_proto, 1);
808f5114 2636 spin_unlock_bh(&net->packet.sklist_lock);
2637
40d4e3df 2638 return 0;
1da177e4
LT
2639out:
2640 return err;
2641}
2642
ed85b565
RC
2643static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
2644{
2645 struct sock_exterr_skb *serr;
2646 struct sk_buff *skb, *skb2;
2647 int copied, err;
2648
2649 err = -EAGAIN;
2650 skb = skb_dequeue(&sk->sk_error_queue);
2651 if (skb == NULL)
2652 goto out;
2653
2654 copied = skb->len;
2655 if (copied > len) {
2656 msg->msg_flags |= MSG_TRUNC;
2657 copied = len;
2658 }
2659 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2660 if (err)
2661 goto out_free_skb;
2662
2663 sock_recv_timestamp(msg, sk, skb);
2664
2665 serr = SKB_EXT_ERR(skb);
2666 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
2667 sizeof(serr->ee), &serr->ee);
2668
2669 msg->msg_flags |= MSG_ERRQUEUE;
2670 err = copied;
2671
2672 /* Reset and regenerate socket error */
2673 spin_lock_bh(&sk->sk_error_queue.lock);
2674 sk->sk_err = 0;
2675 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2676 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2677 spin_unlock_bh(&sk->sk_error_queue.lock);
2678 sk->sk_error_report(sk);
2679 } else
2680 spin_unlock_bh(&sk->sk_error_queue.lock);
2681
2682out_free_skb:
2683 kfree_skb(skb);
2684out:
2685 return err;
2686}
2687
1da177e4
LT
2688/*
2689 * Pull a packet from our receive queue and hand it to the user.
2690 * If necessary we block.
2691 */
2692
2693static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
2694 struct msghdr *msg, size_t len, int flags)
2695{
2696 struct sock *sk = sock->sk;
2697 struct sk_buff *skb;
2698 int copied, err;
0fb375fb 2699 struct sockaddr_ll *sll;
bfd5f4a3 2700 int vnet_hdr_len = 0;
1da177e4
LT
2701
2702 err = -EINVAL;
ed85b565 2703 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
1da177e4
LT
2704 goto out;
2705
2706#if 0
2707 /* What error should we return now? EUNATTACH? */
2708 if (pkt_sk(sk)->ifindex < 0)
2709 return -ENODEV;
2710#endif
2711
ed85b565
RC
2712 if (flags & MSG_ERRQUEUE) {
2713 err = packet_recv_error(sk, msg, len);
2714 goto out;
2715 }
2716
1da177e4
LT
2717 /*
2718 * Call the generic datagram receiver. This handles all sorts
2719 * of horrible races and re-entrancy so we can forget about it
2720 * in the protocol layers.
2721 *
2722 * Now it will return ENETDOWN, if device have just gone down,
2723 * but then it will block.
2724 */
2725
40d4e3df 2726 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
1da177e4
LT
2727
2728 /*
1ce4f28b 2729 * An error occurred so return it. Because skb_recv_datagram()
1da177e4
LT
2730 * handles the blocking we don't see and worry about blocking
2731 * retries.
2732 */
2733
8ae55f04 2734 if (skb == NULL)
1da177e4
LT
2735 goto out;
2736
bfd5f4a3
SS
2737 if (pkt_sk(sk)->has_vnet_hdr) {
2738 struct virtio_net_hdr vnet_hdr = { 0 };
2739
2740 err = -EINVAL;
2741 vnet_hdr_len = sizeof(vnet_hdr);
1f18b717 2742 if (len < vnet_hdr_len)
bfd5f4a3
SS
2743 goto out_free;
2744
1f18b717
MK
2745 len -= vnet_hdr_len;
2746
bfd5f4a3
SS
2747 if (skb_is_gso(skb)) {
2748 struct skb_shared_info *sinfo = skb_shinfo(skb);
2749
2750 /* This is a hint as to how much should be linear. */
2751 vnet_hdr.hdr_len = skb_headlen(skb);
2752 vnet_hdr.gso_size = sinfo->gso_size;
2753 if (sinfo->gso_type & SKB_GSO_TCPV4)
2754 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2755 else if (sinfo->gso_type & SKB_GSO_TCPV6)
2756 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2757 else if (sinfo->gso_type & SKB_GSO_UDP)
2758 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2759 else if (sinfo->gso_type & SKB_GSO_FCOE)
2760 goto out_free;
2761 else
2762 BUG();
2763 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2764 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2765 } else
2766 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2767
2768 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2769 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
55508d60 2770 vnet_hdr.csum_start = skb_checksum_start_offset(skb);
bfd5f4a3 2771 vnet_hdr.csum_offset = skb->csum_offset;
10a8d94a
JW
2772 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2773 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
bfd5f4a3
SS
2774 } /* else everything is zero */
2775
2776 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
2777 vnet_hdr_len);
2778 if (err < 0)
2779 goto out_free;
2780 }
2781
0fb375fb
EB
2782 /*
2783 * If the address length field is there to be filled in, we fill
2784 * it in now.
2785 */
2786
ffbc6111 2787 sll = &PACKET_SKB_CB(skb)->sa.ll;
0fb375fb
EB
2788 if (sock->type == SOCK_PACKET)
2789 msg->msg_namelen = sizeof(struct sockaddr_pkt);
2790 else
2791 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
2792
1da177e4
LT
2793 /*
2794 * You lose any data beyond the buffer you gave. If it worries a
2795 * user program they can ask the device for its MTU anyway.
2796 */
2797
2798 copied = skb->len;
40d4e3df
ED
2799 if (copied > len) {
2800 copied = len;
2801 msg->msg_flags |= MSG_TRUNC;
1da177e4
LT
2802 }
2803
2804 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2805 if (err)
2806 goto out_free;
2807
3b885787 2808 sock_recv_ts_and_drops(msg, sk, skb);
1da177e4
LT
2809
2810 if (msg->msg_name)
ffbc6111
HX
2811 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
2812 msg->msg_namelen);
1da177e4 2813
8dc41944 2814 if (pkt_sk(sk)->auxdata) {
ffbc6111
HX
2815 struct tpacket_auxdata aux;
2816
2817 aux.tp_status = TP_STATUS_USER;
2818 if (skb->ip_summed == CHECKSUM_PARTIAL)
2819 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
2820 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
2821 aux.tp_snaplen = skb->len;
2822 aux.tp_mac = 0;
bbe735e4 2823 aux.tp_net = skb_network_offset(skb);
a3bcc23e
BG
2824 if (vlan_tx_tag_present(skb)) {
2825 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
2826 aux.tp_status |= TP_STATUS_VLAN_VALID;
2827 } else {
2828 aux.tp_vlan_tci = 0;
2829 }
13fcb7bd 2830 aux.tp_padding = 0;
ffbc6111 2831 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
8dc41944
HX
2832 }
2833
1da177e4
LT
2834 /*
2835 * Free or return the buffer as appropriate. Again this
2836 * hides all the races and re-entrancy issues from us.
2837 */
bfd5f4a3 2838 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
1da177e4
LT
2839
2840out_free:
2841 skb_free_datagram(sk, skb);
2842out:
2843 return err;
2844}
2845
1da177e4
LT
2846static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
2847 int *uaddr_len, int peer)
2848{
2849 struct net_device *dev;
2850 struct sock *sk = sock->sk;
2851
2852 if (peer)
2853 return -EOPNOTSUPP;
2854
2855 uaddr->sa_family = AF_PACKET;
654d1f8a
ED
2856 rcu_read_lock();
2857 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
2858 if (dev)
67286640 2859 strncpy(uaddr->sa_data, dev->name, 14);
654d1f8a 2860 else
1da177e4 2861 memset(uaddr->sa_data, 0, 14);
654d1f8a 2862 rcu_read_unlock();
1da177e4
LT
2863 *uaddr_len = sizeof(*uaddr);
2864
2865 return 0;
2866}
1da177e4
LT
2867
2868static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
2869 int *uaddr_len, int peer)
2870{
2871 struct net_device *dev;
2872 struct sock *sk = sock->sk;
2873 struct packet_sock *po = pkt_sk(sk);
13cfa97b 2874 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
1da177e4
LT
2875
2876 if (peer)
2877 return -EOPNOTSUPP;
2878
2879 sll->sll_family = AF_PACKET;
2880 sll->sll_ifindex = po->ifindex;
2881 sll->sll_protocol = po->num;
67286640 2882 sll->sll_pkttype = 0;
654d1f8a
ED
2883 rcu_read_lock();
2884 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
1da177e4
LT
2885 if (dev) {
2886 sll->sll_hatype = dev->type;
2887 sll->sll_halen = dev->addr_len;
2888 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
2889 } else {
2890 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
2891 sll->sll_halen = 0;
2892 }
654d1f8a 2893 rcu_read_unlock();
0fb375fb 2894 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
1da177e4
LT
2895
2896 return 0;
2897}
2898
2aeb0b88
WC
2899static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
2900 int what)
1da177e4
LT
2901{
2902 switch (i->type) {
2903 case PACKET_MR_MULTICAST:
1162563f
JP
2904 if (i->alen != dev->addr_len)
2905 return -EINVAL;
1da177e4 2906 if (what > 0)
22bedad3 2907 return dev_mc_add(dev, i->addr);
1da177e4 2908 else
22bedad3 2909 return dev_mc_del(dev, i->addr);
1da177e4
LT
2910 break;
2911 case PACKET_MR_PROMISC:
2aeb0b88 2912 return dev_set_promiscuity(dev, what);
1da177e4
LT
2913 break;
2914 case PACKET_MR_ALLMULTI:
2aeb0b88 2915 return dev_set_allmulti(dev, what);
1da177e4 2916 break;
d95ed927 2917 case PACKET_MR_UNICAST:
1162563f
JP
2918 if (i->alen != dev->addr_len)
2919 return -EINVAL;
d95ed927 2920 if (what > 0)
a748ee24 2921 return dev_uc_add(dev, i->addr);
d95ed927 2922 else
a748ee24 2923 return dev_uc_del(dev, i->addr);
d95ed927 2924 break;
40d4e3df
ED
2925 default:
2926 break;
1da177e4 2927 }
2aeb0b88 2928 return 0;
1da177e4
LT
2929}
2930
2931static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
2932{
40d4e3df 2933 for ( ; i; i = i->next) {
1da177e4
LT
2934 if (i->ifindex == dev->ifindex)
2935 packet_dev_mc(dev, i, what);
2936 }
2937}
2938
0fb375fb 2939static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1da177e4
LT
2940{
2941 struct packet_sock *po = pkt_sk(sk);
2942 struct packet_mclist *ml, *i;
2943 struct net_device *dev;
2944 int err;
2945
2946 rtnl_lock();
2947
2948 err = -ENODEV;
3b1e0a65 2949 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
1da177e4
LT
2950 if (!dev)
2951 goto done;
2952
2953 err = -EINVAL;
1162563f 2954 if (mreq->mr_alen > dev->addr_len)
1da177e4
LT
2955 goto done;
2956
2957 err = -ENOBUFS;
8b3a7005 2958 i = kmalloc(sizeof(*i), GFP_KERNEL);
1da177e4
LT
2959 if (i == NULL)
2960 goto done;
2961
2962 err = 0;
2963 for (ml = po->mclist; ml; ml = ml->next) {
2964 if (ml->ifindex == mreq->mr_ifindex &&
2965 ml->type == mreq->mr_type &&
2966 ml->alen == mreq->mr_alen &&
2967 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2968 ml->count++;
2969 /* Free the new element ... */
2970 kfree(i);
2971 goto done;
2972 }
2973 }
2974
2975 i->type = mreq->mr_type;
2976 i->ifindex = mreq->mr_ifindex;
2977 i->alen = mreq->mr_alen;
2978 memcpy(i->addr, mreq->mr_address, i->alen);
2979 i->count = 1;
2980 i->next = po->mclist;
2981 po->mclist = i;
2aeb0b88
WC
2982 err = packet_dev_mc(dev, i, 1);
2983 if (err) {
2984 po->mclist = i->next;
2985 kfree(i);
2986 }
1da177e4
LT
2987
2988done:
2989 rtnl_unlock();
2990 return err;
2991}
2992
0fb375fb 2993static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
1da177e4
LT
2994{
2995 struct packet_mclist *ml, **mlp;
2996
2997 rtnl_lock();
2998
2999 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3000 if (ml->ifindex == mreq->mr_ifindex &&
3001 ml->type == mreq->mr_type &&
3002 ml->alen == mreq->mr_alen &&
3003 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3004 if (--ml->count == 0) {
3005 struct net_device *dev;
3006 *mlp = ml->next;
ad959e76
ED
3007 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3008 if (dev)
1da177e4 3009 packet_dev_mc(dev, ml, -1);
1da177e4
LT
3010 kfree(ml);
3011 }
3012 rtnl_unlock();
3013 return 0;
3014 }
3015 }
3016 rtnl_unlock();
3017 return -EADDRNOTAVAIL;
3018}
3019
3020static void packet_flush_mclist(struct sock *sk)
3021{
3022 struct packet_sock *po = pkt_sk(sk);
3023 struct packet_mclist *ml;
3024
3025 if (!po->mclist)
3026 return;
3027
3028 rtnl_lock();
3029 while ((ml = po->mclist) != NULL) {
3030 struct net_device *dev;
3031
3032 po->mclist = ml->next;
ad959e76
ED
3033 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3034 if (dev != NULL)
1da177e4 3035 packet_dev_mc(dev, ml, -1);
1da177e4
LT
3036 kfree(ml);
3037 }
3038 rtnl_unlock();
3039}
1da177e4
LT
3040
3041static int
b7058842 3042packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1da177e4
LT
3043{
3044 struct sock *sk = sock->sk;
8dc41944 3045 struct packet_sock *po = pkt_sk(sk);
1da177e4
LT
3046 int ret;
3047
3048 if (level != SOL_PACKET)
3049 return -ENOPROTOOPT;
3050
69e3c75f 3051 switch (optname) {
1ce4f28b 3052 case PACKET_ADD_MEMBERSHIP:
1da177e4
LT
3053 case PACKET_DROP_MEMBERSHIP:
3054 {
0fb375fb
EB
3055 struct packet_mreq_max mreq;
3056 int len = optlen;
3057 memset(&mreq, 0, sizeof(mreq));
3058 if (len < sizeof(struct packet_mreq))
1da177e4 3059 return -EINVAL;
0fb375fb
EB
3060 if (len > sizeof(mreq))
3061 len = sizeof(mreq);
40d4e3df 3062 if (copy_from_user(&mreq, optval, len))
1da177e4 3063 return -EFAULT;
0fb375fb
EB
3064 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3065 return -EINVAL;
1da177e4
LT
3066 if (optname == PACKET_ADD_MEMBERSHIP)
3067 ret = packet_mc_add(sk, &mreq);
3068 else
3069 ret = packet_mc_drop(sk, &mreq);
3070 return ret;
3071 }
a2efcfa0 3072
1da177e4 3073 case PACKET_RX_RING:
69e3c75f 3074 case PACKET_TX_RING:
1da177e4 3075 {
f6fb8f10 3076 union tpacket_req_u req_u;
3077 int len;
1da177e4 3078
f6fb8f10 3079 switch (po->tp_version) {
3080 case TPACKET_V1:
3081 case TPACKET_V2:
3082 len = sizeof(req_u.req);
3083 break;
3084 case TPACKET_V3:
3085 default:
3086 len = sizeof(req_u.req3);
3087 break;
3088 }
3089 if (optlen < len)
1da177e4 3090 return -EINVAL;
bfd5f4a3
SS
3091 if (pkt_sk(sk)->has_vnet_hdr)
3092 return -EINVAL;
f6fb8f10 3093 if (copy_from_user(&req_u.req, optval, len))
1da177e4 3094 return -EFAULT;
f6fb8f10 3095 return packet_set_ring(sk, &req_u, 0,
3096 optname == PACKET_TX_RING);
1da177e4
LT
3097 }
3098 case PACKET_COPY_THRESH:
3099 {
3100 int val;
3101
40d4e3df 3102 if (optlen != sizeof(val))
1da177e4 3103 return -EINVAL;
40d4e3df 3104 if (copy_from_user(&val, optval, sizeof(val)))
1da177e4
LT
3105 return -EFAULT;
3106
3107 pkt_sk(sk)->copy_thresh = val;
3108 return 0;
3109 }
bbd6ef87
PM
3110 case PACKET_VERSION:
3111 {
3112 int val;
3113
3114 if (optlen != sizeof(val))
3115 return -EINVAL;
69e3c75f 3116 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
bbd6ef87
PM
3117 return -EBUSY;
3118 if (copy_from_user(&val, optval, sizeof(val)))
3119 return -EFAULT;
3120 switch (val) {
3121 case TPACKET_V1:
3122 case TPACKET_V2:
f6fb8f10 3123 case TPACKET_V3:
bbd6ef87
PM
3124 po->tp_version = val;
3125 return 0;
3126 default:
3127 return -EINVAL;
3128 }
3129 }
8913336a
PM
3130 case PACKET_RESERVE:
3131 {
3132 unsigned int val;
3133
3134 if (optlen != sizeof(val))
3135 return -EINVAL;
69e3c75f 3136 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
8913336a
PM
3137 return -EBUSY;
3138 if (copy_from_user(&val, optval, sizeof(val)))
3139 return -EFAULT;
3140 po->tp_reserve = val;
3141 return 0;
3142 }
69e3c75f
JB
3143 case PACKET_LOSS:
3144 {
3145 unsigned int val;
3146
3147 if (optlen != sizeof(val))
3148 return -EINVAL;
3149 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3150 return -EBUSY;
3151 if (copy_from_user(&val, optval, sizeof(val)))
3152 return -EFAULT;
3153 po->tp_loss = !!val;
3154 return 0;
3155 }
8dc41944
HX
3156 case PACKET_AUXDATA:
3157 {
3158 int val;
3159
3160 if (optlen < sizeof(val))
3161 return -EINVAL;
3162 if (copy_from_user(&val, optval, sizeof(val)))
3163 return -EFAULT;
3164
3165 po->auxdata = !!val;
3166 return 0;
3167 }
80feaacb
PWJ
3168 case PACKET_ORIGDEV:
3169 {
3170 int val;
3171
3172 if (optlen < sizeof(val))
3173 return -EINVAL;
3174 if (copy_from_user(&val, optval, sizeof(val)))
3175 return -EFAULT;
3176
3177 po->origdev = !!val;
3178 return 0;
3179 }
bfd5f4a3
SS
3180 case PACKET_VNET_HDR:
3181 {
3182 int val;
3183
3184 if (sock->type != SOCK_RAW)
3185 return -EINVAL;
3186 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3187 return -EBUSY;
3188 if (optlen < sizeof(val))
3189 return -EINVAL;
3190 if (copy_from_user(&val, optval, sizeof(val)))
3191 return -EFAULT;
3192
3193 po->has_vnet_hdr = !!val;
3194 return 0;
3195 }
614f60fa
SM
3196 case PACKET_TIMESTAMP:
3197 {
3198 int val;
3199
3200 if (optlen != sizeof(val))
3201 return -EINVAL;
3202 if (copy_from_user(&val, optval, sizeof(val)))
3203 return -EFAULT;
3204
3205 po->tp_tstamp = val;
3206 return 0;
3207 }
dc99f600
DM
3208 case PACKET_FANOUT:
3209 {
3210 int val;
3211
3212 if (optlen != sizeof(val))
3213 return -EINVAL;
3214 if (copy_from_user(&val, optval, sizeof(val)))
3215 return -EFAULT;
3216
3217 return fanout_add(sk, val & 0xffff, val >> 16);
3218 }
1da177e4
LT
3219 default:
3220 return -ENOPROTOOPT;
3221 }
3222}
3223
3224static int packet_getsockopt(struct socket *sock, int level, int optname,
3225 char __user *optval, int __user *optlen)
3226{
3227 int len;
c06fff6e 3228 int val, lv = sizeof(val);
1da177e4
LT
3229 struct sock *sk = sock->sk;
3230 struct packet_sock *po = pkt_sk(sk);
c06fff6e 3231 void *data = &val;
8dc41944 3232 struct tpacket_stats st;
f6fb8f10 3233 union tpacket_stats_u st_u;
1da177e4
LT
3234
3235 if (level != SOL_PACKET)
3236 return -ENOPROTOOPT;
3237
8ae55f04
KK
3238 if (get_user(len, optlen))
3239 return -EFAULT;
1da177e4
LT
3240
3241 if (len < 0)
3242 return -EINVAL;
1ce4f28b 3243
69e3c75f 3244 switch (optname) {
1da177e4 3245 case PACKET_STATISTICS:
1da177e4 3246 spin_lock_bh(&sk->sk_receive_queue.lock);
f6fb8f10 3247 if (po->tp_version == TPACKET_V3) {
c06fff6e 3248 lv = sizeof(struct tpacket_stats_v3);
f6fb8f10 3249 memcpy(&st_u.stats3, &po->stats,
c06fff6e 3250 sizeof(struct tpacket_stats));
f6fb8f10 3251 st_u.stats3.tp_freeze_q_cnt =
c06fff6e 3252 po->stats_u.stats3.tp_freeze_q_cnt;
f6fb8f10 3253 st_u.stats3.tp_packets += po->stats.tp_drops;
3254 data = &st_u.stats3;
3255 } else {
c06fff6e 3256 lv = sizeof(struct tpacket_stats);
f6fb8f10 3257 st = po->stats;
3258 st.tp_packets += st.tp_drops;
3259 data = &st;
3260 }
1da177e4
LT
3261 memset(&po->stats, 0, sizeof(st));
3262 spin_unlock_bh(&sk->sk_receive_queue.lock);
8dc41944
HX
3263 break;
3264 case PACKET_AUXDATA:
8dc41944 3265 val = po->auxdata;
80feaacb
PWJ
3266 break;
3267 case PACKET_ORIGDEV:
80feaacb 3268 val = po->origdev;
bfd5f4a3
SS
3269 break;
3270 case PACKET_VNET_HDR:
bfd5f4a3 3271 val = po->has_vnet_hdr;
1da177e4 3272 break;
bbd6ef87 3273 case PACKET_VERSION:
bbd6ef87 3274 val = po->tp_version;
bbd6ef87
PM
3275 break;
3276 case PACKET_HDRLEN:
3277 if (len > sizeof(int))
3278 len = sizeof(int);
3279 if (copy_from_user(&val, optval, len))
3280 return -EFAULT;
3281 switch (val) {
3282 case TPACKET_V1:
3283 val = sizeof(struct tpacket_hdr);
3284 break;
3285 case TPACKET_V2:
3286 val = sizeof(struct tpacket2_hdr);
3287 break;
f6fb8f10 3288 case TPACKET_V3:
3289 val = sizeof(struct tpacket3_hdr);
3290 break;
bbd6ef87
PM
3291 default:
3292 return -EINVAL;
3293 }
bbd6ef87 3294 break;
8913336a 3295 case PACKET_RESERVE:
8913336a 3296 val = po->tp_reserve;
8913336a 3297 break;
69e3c75f 3298 case PACKET_LOSS:
69e3c75f 3299 val = po->tp_loss;
69e3c75f 3300 break;
614f60fa 3301 case PACKET_TIMESTAMP:
614f60fa 3302 val = po->tp_tstamp;
614f60fa 3303 break;
dc99f600 3304 case PACKET_FANOUT:
dc99f600
DM
3305 val = (po->fanout ?
3306 ((u32)po->fanout->id |
3307 ((u32)po->fanout->type << 16)) :
3308 0);
dc99f600 3309 break;
1da177e4
LT
3310 default:
3311 return -ENOPROTOOPT;
3312 }
3313
c06fff6e
ED
3314 if (len > lv)
3315 len = lv;
8ae55f04
KK
3316 if (put_user(len, optlen))
3317 return -EFAULT;
8dc41944
HX
3318 if (copy_to_user(optval, data, len))
3319 return -EFAULT;
8ae55f04 3320 return 0;
1da177e4
LT
3321}
3322
3323
3324static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
3325{
3326 struct sock *sk;
3327 struct hlist_node *node;
ad930650 3328 struct net_device *dev = data;
c346dca1 3329 struct net *net = dev_net(dev);
1da177e4 3330
808f5114 3331 rcu_read_lock();
3332 sk_for_each_rcu(sk, node, &net->packet.sklist) {
1da177e4
LT
3333 struct packet_sock *po = pkt_sk(sk);
3334
3335 switch (msg) {
3336 case NETDEV_UNREGISTER:
1da177e4
LT
3337 if (po->mclist)
3338 packet_dev_mclist(dev, po->mclist, -1);
a2efcfa0
DM
3339 /* fallthrough */
3340
1da177e4
LT
3341 case NETDEV_DOWN:
3342 if (dev->ifindex == po->ifindex) {
3343 spin_lock(&po->bind_lock);
3344 if (po->running) {
ce06b03e 3345 __unregister_prot_hook(sk, false);
1da177e4
LT
3346 sk->sk_err = ENETDOWN;
3347 if (!sock_flag(sk, SOCK_DEAD))
3348 sk->sk_error_report(sk);
3349 }
3350 if (msg == NETDEV_UNREGISTER) {
3351 po->ifindex = -1;
160ff18a
BG
3352 if (po->prot_hook.dev)
3353 dev_put(po->prot_hook.dev);
1da177e4
LT
3354 po->prot_hook.dev = NULL;
3355 }
3356 spin_unlock(&po->bind_lock);
3357 }
3358 break;
3359 case NETDEV_UP:
808f5114 3360 if (dev->ifindex == po->ifindex) {
3361 spin_lock(&po->bind_lock);
ce06b03e
DM
3362 if (po->num)
3363 register_prot_hook(sk);
808f5114 3364 spin_unlock(&po->bind_lock);
1da177e4 3365 }
1da177e4
LT
3366 break;
3367 }
3368 }
808f5114 3369 rcu_read_unlock();
1da177e4
LT
3370 return NOTIFY_DONE;
3371}
3372
3373
3374static int packet_ioctl(struct socket *sock, unsigned int cmd,
3375 unsigned long arg)
3376{
3377 struct sock *sk = sock->sk;
3378
69e3c75f 3379 switch (cmd) {
40d4e3df
ED
3380 case SIOCOUTQ:
3381 {
3382 int amount = sk_wmem_alloc_get(sk);
31e6d363 3383
40d4e3df
ED
3384 return put_user(amount, (int __user *)arg);
3385 }
3386 case SIOCINQ:
3387 {
3388 struct sk_buff *skb;
3389 int amount = 0;
3390
3391 spin_lock_bh(&sk->sk_receive_queue.lock);
3392 skb = skb_peek(&sk->sk_receive_queue);
3393 if (skb)
3394 amount = skb->len;
3395 spin_unlock_bh(&sk->sk_receive_queue.lock);
3396 return put_user(amount, (int __user *)arg);
3397 }
3398 case SIOCGSTAMP:
3399 return sock_get_timestamp(sk, (struct timeval __user *)arg);
3400 case SIOCGSTAMPNS:
3401 return sock_get_timestampns(sk, (struct timespec __user *)arg);
1ce4f28b 3402
1da177e4 3403#ifdef CONFIG_INET
40d4e3df
ED
3404 case SIOCADDRT:
3405 case SIOCDELRT:
3406 case SIOCDARP:
3407 case SIOCGARP:
3408 case SIOCSARP:
3409 case SIOCGIFADDR:
3410 case SIOCSIFADDR:
3411 case SIOCGIFBRDADDR:
3412 case SIOCSIFBRDADDR:
3413 case SIOCGIFNETMASK:
3414 case SIOCSIFNETMASK:
3415 case SIOCGIFDSTADDR:
3416 case SIOCSIFDSTADDR:
3417 case SIOCSIFFLAGS:
40d4e3df 3418 return inet_dgram_ops.ioctl(sock, cmd, arg);
1da177e4
LT
3419#endif
3420
40d4e3df
ED
3421 default:
3422 return -ENOIOCTLCMD;
1da177e4
LT
3423 }
3424 return 0;
3425}
3426
40d4e3df 3427static unsigned int packet_poll(struct file *file, struct socket *sock,
1da177e4
LT
3428 poll_table *wait)
3429{
3430 struct sock *sk = sock->sk;
3431 struct packet_sock *po = pkt_sk(sk);
3432 unsigned int mask = datagram_poll(file, sock, wait);
3433
3434 spin_lock_bh(&sk->sk_receive_queue.lock);
69e3c75f 3435 if (po->rx_ring.pg_vec) {
f6fb8f10 3436 if (!packet_previous_rx_frame(po, &po->rx_ring,
3437 TP_STATUS_KERNEL))
1da177e4
LT
3438 mask |= POLLIN | POLLRDNORM;
3439 }
3440 spin_unlock_bh(&sk->sk_receive_queue.lock);
69e3c75f
JB
3441 spin_lock_bh(&sk->sk_write_queue.lock);
3442 if (po->tx_ring.pg_vec) {
3443 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3444 mask |= POLLOUT | POLLWRNORM;
3445 }
3446 spin_unlock_bh(&sk->sk_write_queue.lock);
1da177e4
LT
3447 return mask;
3448}
3449
3450
3451/* Dirty? Well, I still did not learn better way to account
3452 * for user mmaps.
3453 */
3454
3455static void packet_mm_open(struct vm_area_struct *vma)
3456{
3457 struct file *file = vma->vm_file;
40d4e3df 3458 struct socket *sock = file->private_data;
1da177e4 3459 struct sock *sk = sock->sk;
1ce4f28b 3460
1da177e4
LT
3461 if (sk)
3462 atomic_inc(&pkt_sk(sk)->mapped);
3463}
3464
3465static void packet_mm_close(struct vm_area_struct *vma)
3466{
3467 struct file *file = vma->vm_file;
40d4e3df 3468 struct socket *sock = file->private_data;
1da177e4 3469 struct sock *sk = sock->sk;
1ce4f28b 3470
1da177e4
LT
3471 if (sk)
3472 atomic_dec(&pkt_sk(sk)->mapped);
3473}
3474
f0f37e2f 3475static const struct vm_operations_struct packet_mmap_ops = {
40d4e3df
ED
3476 .open = packet_mm_open,
3477 .close = packet_mm_close,
1da177e4
LT
3478};
3479
0e3125c7
NH
3480static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3481 unsigned int len)
1da177e4
LT
3482{
3483 int i;
3484
4ebf0ae2 3485 for (i = 0; i < len; i++) {
0e3125c7 3486 if (likely(pg_vec[i].buffer)) {
c56b4d90 3487 if (is_vmalloc_addr(pg_vec[i].buffer))
0e3125c7
NH
3488 vfree(pg_vec[i].buffer);
3489 else
3490 free_pages((unsigned long)pg_vec[i].buffer,
3491 order);
3492 pg_vec[i].buffer = NULL;
3493 }
1da177e4
LT
3494 }
3495 kfree(pg_vec);
3496}
3497
eea49cc9 3498static char *alloc_one_pg_vec_page(unsigned long order)
4ebf0ae2 3499{
0e3125c7
NH
3500 char *buffer = NULL;
3501 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3502 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
3503
3504 buffer = (char *) __get_free_pages(gfp_flags, order);
3505
3506 if (buffer)
3507 return buffer;
3508
3509 /*
3510 * __get_free_pages failed, fall back to vmalloc
3511 */
bbce5a59 3512 buffer = vzalloc((1 << order) * PAGE_SIZE);
719bfeaa 3513
0e3125c7
NH
3514 if (buffer)
3515 return buffer;
3516
3517 /*
3518 * vmalloc failed, lets dig into swap here
3519 */
0e3125c7
NH
3520 gfp_flags &= ~__GFP_NORETRY;
3521 buffer = (char *)__get_free_pages(gfp_flags, order);
3522 if (buffer)
3523 return buffer;
3524
3525 /*
3526 * complete and utter failure
3527 */
3528 return NULL;
4ebf0ae2
DM
3529}
3530
0e3125c7 3531static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4ebf0ae2
DM
3532{
3533 unsigned int block_nr = req->tp_block_nr;
0e3125c7 3534 struct pgv *pg_vec;
4ebf0ae2
DM
3535 int i;
3536
0e3125c7 3537 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
4ebf0ae2
DM
3538 if (unlikely(!pg_vec))
3539 goto out;
3540
3541 for (i = 0; i < block_nr; i++) {
c56b4d90 3542 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
0e3125c7 3543 if (unlikely(!pg_vec[i].buffer))
4ebf0ae2
DM
3544 goto out_free_pgvec;
3545 }
3546
3547out:
3548 return pg_vec;
3549
3550out_free_pgvec:
3551 free_pg_vec(pg_vec, order, block_nr);
3552 pg_vec = NULL;
3553 goto out;
3554}
1da177e4 3555
f6fb8f10 3556static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
69e3c75f 3557 int closing, int tx_ring)
1da177e4 3558{
0e3125c7 3559 struct pgv *pg_vec = NULL;
1da177e4 3560 struct packet_sock *po = pkt_sk(sk);
0e11c91e 3561 int was_running, order = 0;
69e3c75f
JB
3562 struct packet_ring_buffer *rb;
3563 struct sk_buff_head *rb_queue;
0e11c91e 3564 __be16 num;
f6fb8f10 3565 int err = -EINVAL;
3566 /* Added to avoid minimal code churn */
3567 struct tpacket_req *req = &req_u->req;
3568
3569 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3570 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3571 WARN(1, "Tx-ring is not supported.\n");
3572 goto out;
3573 }
1ce4f28b 3574
69e3c75f
JB
3575 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3576 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
1da177e4 3577
69e3c75f
JB
3578 err = -EBUSY;
3579 if (!closing) {
3580 if (atomic_read(&po->mapped))
3581 goto out;
3582 if (atomic_read(&rb->pending))
3583 goto out;
3584 }
1da177e4 3585
69e3c75f
JB
3586 if (req->tp_block_nr) {
3587 /* Sanity tests and some calculations */
3588 err = -EBUSY;
3589 if (unlikely(rb->pg_vec))
3590 goto out;
1da177e4 3591
bbd6ef87
PM
3592 switch (po->tp_version) {
3593 case TPACKET_V1:
3594 po->tp_hdrlen = TPACKET_HDRLEN;
3595 break;
3596 case TPACKET_V2:
3597 po->tp_hdrlen = TPACKET2_HDRLEN;
3598 break;
f6fb8f10 3599 case TPACKET_V3:
3600 po->tp_hdrlen = TPACKET3_HDRLEN;
3601 break;
bbd6ef87
PM
3602 }
3603
69e3c75f 3604 err = -EINVAL;
4ebf0ae2 3605 if (unlikely((int)req->tp_block_size <= 0))
69e3c75f 3606 goto out;
4ebf0ae2 3607 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
69e3c75f 3608 goto out;
8913336a 3609 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
69e3c75f
JB
3610 po->tp_reserve))
3611 goto out;
4ebf0ae2 3612 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
69e3c75f 3613 goto out;
1da177e4 3614
69e3c75f
JB
3615 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3616 if (unlikely(rb->frames_per_block <= 0))
3617 goto out;
3618 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3619 req->tp_frame_nr))
3620 goto out;
1da177e4
LT
3621
3622 err = -ENOMEM;
4ebf0ae2
DM
3623 order = get_order(req->tp_block_size);
3624 pg_vec = alloc_pg_vec(req, order);
3625 if (unlikely(!pg_vec))
1da177e4 3626 goto out;
f6fb8f10 3627 switch (po->tp_version) {
3628 case TPACKET_V3:
3629 /* Transmit path is not supported. We checked
3630 * it above but just being paranoid
3631 */
3632 if (!tx_ring)
3633 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3634 break;
3635 default:
3636 break;
3637 }
69e3c75f
JB
3638 }
3639 /* Done */
3640 else {
3641 err = -EINVAL;
4ebf0ae2 3642 if (unlikely(req->tp_frame_nr))
69e3c75f 3643 goto out;
1da177e4
LT
3644 }
3645
3646 lock_sock(sk);
3647
3648 /* Detach socket from network */
3649 spin_lock(&po->bind_lock);
3650 was_running = po->running;
3651 num = po->num;
3652 if (was_running) {
1da177e4 3653 po->num = 0;
ce06b03e 3654 __unregister_prot_hook(sk, false);
1da177e4
LT
3655 }
3656 spin_unlock(&po->bind_lock);
1ce4f28b 3657
1da177e4
LT
3658 synchronize_net();
3659
3660 err = -EBUSY;
905db440 3661 mutex_lock(&po->pg_vec_lock);
1da177e4
LT
3662 if (closing || atomic_read(&po->mapped) == 0) {
3663 err = 0;
69e3c75f 3664 spin_lock_bh(&rb_queue->lock);
c053fd96 3665 swap(rb->pg_vec, pg_vec);
69e3c75f
JB
3666 rb->frame_max = (req->tp_frame_nr - 1);
3667 rb->head = 0;
3668 rb->frame_size = req->tp_frame_size;
3669 spin_unlock_bh(&rb_queue->lock);
3670
c053fd96
CG
3671 swap(rb->pg_vec_order, order);
3672 swap(rb->pg_vec_len, req->tp_block_nr);
69e3c75f
JB
3673
3674 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
3675 po->prot_hook.func = (po->rx_ring.pg_vec) ?
3676 tpacket_rcv : packet_rcv;
3677 skb_queue_purge(rb_queue);
1da177e4 3678 if (atomic_read(&po->mapped))
40d4e3df
ED
3679 pr_err("packet_mmap: vma is busy: %d\n",
3680 atomic_read(&po->mapped));
1da177e4 3681 }
905db440 3682 mutex_unlock(&po->pg_vec_lock);
1da177e4
LT
3683
3684 spin_lock(&po->bind_lock);
ce06b03e 3685 if (was_running) {
1da177e4 3686 po->num = num;
ce06b03e 3687 register_prot_hook(sk);
1da177e4
LT
3688 }
3689 spin_unlock(&po->bind_lock);
f6fb8f10 3690 if (closing && (po->tp_version > TPACKET_V2)) {
3691 /* Because we don't support block-based V3 on tx-ring */
3692 if (!tx_ring)
3693 prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3694 }
1da177e4
LT
3695 release_sock(sk);
3696
1da177e4
LT
3697 if (pg_vec)
3698 free_pg_vec(pg_vec, order, req->tp_block_nr);
3699out:
3700 return err;
3701}
3702
69e3c75f
JB
3703static int packet_mmap(struct file *file, struct socket *sock,
3704 struct vm_area_struct *vma)
1da177e4
LT
3705{
3706 struct sock *sk = sock->sk;
3707 struct packet_sock *po = pkt_sk(sk);
69e3c75f
JB
3708 unsigned long size, expected_size;
3709 struct packet_ring_buffer *rb;
1da177e4
LT
3710 unsigned long start;
3711 int err = -EINVAL;
3712 int i;
3713
3714 if (vma->vm_pgoff)
3715 return -EINVAL;
3716
905db440 3717 mutex_lock(&po->pg_vec_lock);
69e3c75f
JB
3718
3719 expected_size = 0;
3720 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3721 if (rb->pg_vec) {
3722 expected_size += rb->pg_vec_len
3723 * rb->pg_vec_pages
3724 * PAGE_SIZE;
3725 }
3726 }
3727
3728 if (expected_size == 0)
1da177e4 3729 goto out;
69e3c75f
JB
3730
3731 size = vma->vm_end - vma->vm_start;
3732 if (size != expected_size)
1da177e4
LT
3733 goto out;
3734
1da177e4 3735 start = vma->vm_start;
69e3c75f
JB
3736 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3737 if (rb->pg_vec == NULL)
3738 continue;
3739
3740 for (i = 0; i < rb->pg_vec_len; i++) {
0e3125c7
NH
3741 struct page *page;
3742 void *kaddr = rb->pg_vec[i].buffer;
69e3c75f
JB
3743 int pg_num;
3744
c56b4d90
CG
3745 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
3746 page = pgv_to_page(kaddr);
69e3c75f
JB
3747 err = vm_insert_page(vma, start, page);
3748 if (unlikely(err))
3749 goto out;
3750 start += PAGE_SIZE;
0e3125c7 3751 kaddr += PAGE_SIZE;
69e3c75f 3752 }
4ebf0ae2 3753 }
1da177e4 3754 }
69e3c75f 3755
4ebf0ae2 3756 atomic_inc(&po->mapped);
1da177e4
LT
3757 vma->vm_ops = &packet_mmap_ops;
3758 err = 0;
3759
3760out:
905db440 3761 mutex_unlock(&po->pg_vec_lock);
1da177e4
LT
3762 return err;
3763}
1da177e4 3764
90ddc4f0 3765static const struct proto_ops packet_ops_spkt = {
1da177e4
LT
3766 .family = PF_PACKET,
3767 .owner = THIS_MODULE,
3768 .release = packet_release,
3769 .bind = packet_bind_spkt,
3770 .connect = sock_no_connect,
3771 .socketpair = sock_no_socketpair,
3772 .accept = sock_no_accept,
3773 .getname = packet_getname_spkt,
3774 .poll = datagram_poll,
3775 .ioctl = packet_ioctl,
3776 .listen = sock_no_listen,
3777 .shutdown = sock_no_shutdown,
3778 .setsockopt = sock_no_setsockopt,
3779 .getsockopt = sock_no_getsockopt,
3780 .sendmsg = packet_sendmsg_spkt,
3781 .recvmsg = packet_recvmsg,
3782 .mmap = sock_no_mmap,
3783 .sendpage = sock_no_sendpage,
3784};
1da177e4 3785
90ddc4f0 3786static const struct proto_ops packet_ops = {
1da177e4
LT
3787 .family = PF_PACKET,
3788 .owner = THIS_MODULE,
3789 .release = packet_release,
3790 .bind = packet_bind,
3791 .connect = sock_no_connect,
3792 .socketpair = sock_no_socketpair,
3793 .accept = sock_no_accept,
1ce4f28b 3794 .getname = packet_getname,
1da177e4
LT
3795 .poll = packet_poll,
3796 .ioctl = packet_ioctl,
3797 .listen = sock_no_listen,
3798 .shutdown = sock_no_shutdown,
3799 .setsockopt = packet_setsockopt,
3800 .getsockopt = packet_getsockopt,
3801 .sendmsg = packet_sendmsg,
3802 .recvmsg = packet_recvmsg,
3803 .mmap = packet_mmap,
3804 .sendpage = sock_no_sendpage,
3805};
3806
ec1b4cf7 3807static const struct net_proto_family packet_family_ops = {
1da177e4
LT
3808 .family = PF_PACKET,
3809 .create = packet_create,
3810 .owner = THIS_MODULE,
3811};
3812
3813static struct notifier_block packet_netdev_notifier = {
40d4e3df 3814 .notifier_call = packet_notifier,
1da177e4
LT
3815};
3816
3817#ifdef CONFIG_PROC_FS
1da177e4
LT
3818
3819static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
808f5114 3820 __acquires(RCU)
1da177e4 3821{
e372c414 3822 struct net *net = seq_file_net(seq);
808f5114 3823
3824 rcu_read_lock();
3825 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
1da177e4
LT
3826}
3827
3828static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3829{
1bf40954 3830 struct net *net = seq_file_net(seq);
808f5114 3831 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
1da177e4
LT
3832}
3833
3834static void packet_seq_stop(struct seq_file *seq, void *v)
808f5114 3835 __releases(RCU)
1da177e4 3836{
808f5114 3837 rcu_read_unlock();
1da177e4
LT
3838}
3839
1ce4f28b 3840static int packet_seq_show(struct seq_file *seq, void *v)
1da177e4
LT
3841{
3842 if (v == SEQ_START_TOKEN)
3843 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
3844 else {
b7ceabd9 3845 struct sock *s = sk_entry(v);
1da177e4
LT
3846 const struct packet_sock *po = pkt_sk(s);
3847
3848 seq_printf(seq,
71338aa7 3849 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
1da177e4
LT
3850 s,
3851 atomic_read(&s->sk_refcnt),
3852 s->sk_type,
3853 ntohs(po->num),
3854 po->ifindex,
3855 po->running,
3856 atomic_read(&s->sk_rmem_alloc),
3857 sock_i_uid(s),
40d4e3df 3858 sock_i_ino(s));
1da177e4
LT
3859 }
3860
3861 return 0;
3862}
3863
56b3d975 3864static const struct seq_operations packet_seq_ops = {
1da177e4
LT
3865 .start = packet_seq_start,
3866 .next = packet_seq_next,
3867 .stop = packet_seq_stop,
3868 .show = packet_seq_show,
3869};
3870
3871static int packet_seq_open(struct inode *inode, struct file *file)
3872{
e372c414
DL
3873 return seq_open_net(inode, file, &packet_seq_ops,
3874 sizeof(struct seq_net_private));
1da177e4
LT
3875}
3876
da7071d7 3877static const struct file_operations packet_seq_fops = {
1da177e4
LT
3878 .owner = THIS_MODULE,
3879 .open = packet_seq_open,
3880 .read = seq_read,
3881 .llseek = seq_lseek,
e372c414 3882 .release = seq_release_net,
1da177e4
LT
3883};
3884
3885#endif
3886
2c8c1e72 3887static int __net_init packet_net_init(struct net *net)
d12d01d6 3888{
808f5114 3889 spin_lock_init(&net->packet.sklist_lock);
2aaef4e4 3890 INIT_HLIST_HEAD(&net->packet.sklist);
d12d01d6
DL
3891
3892 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
3893 return -ENOMEM;
3894
3895 return 0;
3896}
3897
2c8c1e72 3898static void __net_exit packet_net_exit(struct net *net)
d12d01d6
DL
3899{
3900 proc_net_remove(net, "packet");
3901}
3902
3903static struct pernet_operations packet_net_ops = {
3904 .init = packet_net_init,
3905 .exit = packet_net_exit,
3906};
3907
3908
1da177e4
LT
3909static void __exit packet_exit(void)
3910{
1da177e4 3911 unregister_netdevice_notifier(&packet_netdev_notifier);
d12d01d6 3912 unregister_pernet_subsys(&packet_net_ops);
1da177e4
LT
3913 sock_unregister(PF_PACKET);
3914 proto_unregister(&packet_proto);
3915}
3916
3917static int __init packet_init(void)
3918{
3919 int rc = proto_register(&packet_proto, 0);
3920
3921 if (rc != 0)
3922 goto out;
3923
3924 sock_register(&packet_family_ops);
d12d01d6 3925 register_pernet_subsys(&packet_net_ops);
1da177e4 3926 register_netdevice_notifier(&packet_netdev_notifier);
1da177e4
LT
3927out:
3928 return rc;
3929}
3930
3931module_init(packet_init);
3932module_exit(packet_exit);
3933MODULE_LICENSE("GPL");
3934MODULE_ALIAS_NETPROTO(PF_PACKET);