regulator: tps6105x: Convert to use regmap helper functions
[cascardo/linux.git] / net / packet / af_packet.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              PACKET - implements raw packet sockets.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
11  *
12  * Fixes:
13  *              Alan Cox        :       verify_area() now used correctly
14  *              Alan Cox        :       new skbuff lists, look ma no backlogs!
15  *              Alan Cox        :       tidied skbuff lists.
16  *              Alan Cox        :       Now uses generic datagram routines I
17  *                                      added. Also fixed the peek/read crash
18  *                                      from all old Linux datagram code.
19  *              Alan Cox        :       Uses the improved datagram code.
20  *              Alan Cox        :       Added NULL's for socket options.
21  *              Alan Cox        :       Re-commented the code.
22  *              Alan Cox        :       Use new kernel side addressing
23  *              Rob Janssen     :       Correct MTU usage.
24  *              Dave Platt      :       Counter leaks caused by incorrect
25  *                                      interrupt locking and some slightly
26  *                                      dubious gcc output. Can you read
27  *                                      compiler: it said _VOLATILE_
28  *      Richard Kooijman        :       Timestamp fixes.
29  *              Alan Cox        :       New buffers. Use sk->mac.raw.
30  *              Alan Cox        :       sendmsg/recvmsg support.
31  *              Alan Cox        :       Protocol setting support
32  *      Alexey Kuznetsov        :       Untied from IPv4 stack.
33  *      Cyrus Durgin            :       Fixed kerneld for kmod.
34  *      Michal Ostrowski        :       Module initialization cleanup.
35  *         Ulises Alonso        :       Frame number limit removal and
36  *                                      packet_set_ring memory leak.
37  *              Eric Biederman  :       Allow for > 8 byte hardware addresses.
38  *                                      The convention is that longer addresses
39  *                                      will simply extend the hardware address
40  *                                      byte arrays at the end of sockaddr_ll
41  *                                      and packet_mreq.
42  *              Johann Baudy    :       Added TX RING.
43  *              Chetan Loke     :       Implemented TPACKET_V3 block abstraction
44  *                                      layer.
45  *                                      Copyright (C) 2011, <lokec@ccs.neu.edu>
46  *
47  *
48  *              This program is free software; you can redistribute it and/or
49  *              modify it under the terms of the GNU General Public License
50  *              as published by the Free Software Foundation; either version
51  *              2 of the License, or (at your option) any later version.
52  *
53  */
54
55 #include <linux/types.h>
56 #include <linux/mm.h>
57 #include <linux/capability.h>
58 #include <linux/fcntl.h>
59 #include <linux/socket.h>
60 #include <linux/in.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/if_packet.h>
64 #include <linux/wireless.h>
65 #include <linux/kernel.h>
66 #include <linux/kmod.h>
67 #include <linux/slab.h>
68 #include <linux/vmalloc.h>
69 #include <net/net_namespace.h>
70 #include <net/ip.h>
71 #include <net/protocol.h>
72 #include <linux/skbuff.h>
73 #include <net/sock.h>
74 #include <linux/errno.h>
75 #include <linux/timer.h>
76 #include <asm/uaccess.h>
77 #include <asm/ioctls.h>
78 #include <asm/page.h>
79 #include <asm/cacheflush.h>
80 #include <asm/io.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/poll.h>
84 #include <linux/module.h>
85 #include <linux/init.h>
86 #include <linux/mutex.h>
87 #include <linux/if_vlan.h>
88 #include <linux/virtio_net.h>
89 #include <linux/errqueue.h>
90 #include <linux/net_tstamp.h>
91 #include <linux/percpu.h>
92 #ifdef CONFIG_INET
93 #include <net/inet_common.h>
94 #endif
95 #include <linux/bpf.h>
96
97 #include "internal.h"
98
99 /*
100    Assumptions:
101    - if device has no dev->hard_header routine, it adds and removes ll header
102      inside itself. In this case ll header is invisible outside of device,
103      but higher levels still should reserve dev->hard_header_len.
104      Some devices are enough clever to reallocate skb, when header
105      will not fit to reserved space (tunnel), another ones are silly
106      (PPP).
107    - packet socket receives packets with pulled ll header,
108      so that SOCK_RAW should push it back.
109
110 On receive:
111 -----------
112
113 Incoming, dev->hard_header!=NULL
114    mac_header -> ll header
115    data       -> data
116
117 Outgoing, dev->hard_header!=NULL
118    mac_header -> ll header
119    data       -> ll header
120
121 Incoming, dev->hard_header==NULL
122    mac_header -> UNKNOWN position. It is very likely, that it points to ll
123                  header.  PPP makes it, that is wrong, because introduce
124                  assymetry between rx and tx paths.
125    data       -> data
126
127 Outgoing, dev->hard_header==NULL
128    mac_header -> data. ll header is still not built!
129    data       -> data
130
131 Resume
132   If dev->hard_header==NULL we are unlikely to restore sensible ll header.
133
134
135 On transmit:
136 ------------
137
138 dev->hard_header != NULL
139    mac_header -> ll header
140    data       -> ll header
141
142 dev->hard_header == NULL (ll header is added by device, we cannot control it)
143    mac_header -> data
144    data       -> data
145
146    We should set nh.raw on output to correct posistion,
147    packet classifier depends on it.
148  */
149
150 /* Private packet socket structures. */
151
152 /* identical to struct packet_mreq except it has
153  * a longer address field.
154  */
155 struct packet_mreq_max {
156         int             mr_ifindex;
157         unsigned short  mr_type;
158         unsigned short  mr_alen;
159         unsigned char   mr_address[MAX_ADDR_LEN];
160 };
161
162 union tpacket_uhdr {
163         struct tpacket_hdr  *h1;
164         struct tpacket2_hdr *h2;
165         struct tpacket3_hdr *h3;
166         void *raw;
167 };
168
169 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
170                 int closing, int tx_ring);
171
172 #define V3_ALIGNMENT    (8)
173
174 #define BLK_HDR_LEN     (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
175
176 #define BLK_PLUS_PRIV(sz_of_priv) \
177         (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
178
179 #define PGV_FROM_VMALLOC 1
180
181 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
182 #define BLOCK_NUM_PKTS(x)       ((x)->hdr.bh1.num_pkts)
183 #define BLOCK_O2FP(x)           ((x)->hdr.bh1.offset_to_first_pkt)
184 #define BLOCK_LEN(x)            ((x)->hdr.bh1.blk_len)
185 #define BLOCK_SNUM(x)           ((x)->hdr.bh1.seq_num)
186 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
187 #define BLOCK_PRIV(x)           ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
188
189 struct packet_sock;
190 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
191 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
192                        struct packet_type *pt, struct net_device *orig_dev);
193
194 static void *packet_previous_frame(struct packet_sock *po,
195                 struct packet_ring_buffer *rb,
196                 int status);
197 static void packet_increment_head(struct packet_ring_buffer *buff);
198 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
199                         struct tpacket_block_desc *);
200 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
201                         struct packet_sock *);
202 static void prb_retire_current_block(struct tpacket_kbdq_core *,
203                 struct packet_sock *, unsigned int status);
204 static int prb_queue_frozen(struct tpacket_kbdq_core *);
205 static void prb_open_block(struct tpacket_kbdq_core *,
206                 struct tpacket_block_desc *);
207 static void prb_retire_rx_blk_timer_expired(unsigned long);
208 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
209 static void prb_init_blk_timer(struct packet_sock *,
210                 struct tpacket_kbdq_core *,
211                 void (*func) (unsigned long));
212 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
213 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
214                 struct tpacket3_hdr *);
215 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
216                 struct tpacket3_hdr *);
217 static void packet_flush_mclist(struct sock *sk);
218
219 struct packet_skb_cb {
220         union {
221                 struct sockaddr_pkt pkt;
222                 union {
223                         /* Trick: alias skb original length with
224                          * ll.sll_family and ll.protocol in order
225                          * to save room.
226                          */
227                         unsigned int origlen;
228                         struct sockaddr_ll ll;
229                 };
230         } sa;
231 };
232
233 #define vio_le() virtio_legacy_is_little_endian()
234
235 #define PACKET_SKB_CB(__skb)    ((struct packet_skb_cb *)((__skb)->cb))
236
237 #define GET_PBDQC_FROM_RB(x)    ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
238 #define GET_PBLOCK_DESC(x, bid) \
239         ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
240 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x)       \
241         ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
242 #define GET_NEXT_PRB_BLK_NUM(x) \
243         (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
244         ((x)->kactive_blk_num+1) : 0)
245
246 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
247 static void __fanout_link(struct sock *sk, struct packet_sock *po);
248
249 static int packet_direct_xmit(struct sk_buff *skb)
250 {
251         struct net_device *dev = skb->dev;
252         netdev_features_t features;
253         struct netdev_queue *txq;
254         int ret = NETDEV_TX_BUSY;
255
256         if (unlikely(!netif_running(dev) ||
257                      !netif_carrier_ok(dev)))
258                 goto drop;
259
260         features = netif_skb_features(skb);
261         if (skb_needs_linearize(skb, features) &&
262             __skb_linearize(skb))
263                 goto drop;
264
265         txq = skb_get_tx_queue(dev, skb);
266
267         local_bh_disable();
268
269         HARD_TX_LOCK(dev, txq, smp_processor_id());
270         if (!netif_xmit_frozen_or_drv_stopped(txq))
271                 ret = netdev_start_xmit(skb, dev, txq, false);
272         HARD_TX_UNLOCK(dev, txq);
273
274         local_bh_enable();
275
276         if (!dev_xmit_complete(ret))
277                 kfree_skb(skb);
278
279         return ret;
280 drop:
281         atomic_long_inc(&dev->tx_dropped);
282         kfree_skb(skb);
283         return NET_XMIT_DROP;
284 }
285
286 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
287 {
288         struct net_device *dev;
289
290         rcu_read_lock();
291         dev = rcu_dereference(po->cached_dev);
292         if (likely(dev))
293                 dev_hold(dev);
294         rcu_read_unlock();
295
296         return dev;
297 }
298
299 static void packet_cached_dev_assign(struct packet_sock *po,
300                                      struct net_device *dev)
301 {
302         rcu_assign_pointer(po->cached_dev, dev);
303 }
304
305 static void packet_cached_dev_reset(struct packet_sock *po)
306 {
307         RCU_INIT_POINTER(po->cached_dev, NULL);
308 }
309
310 static bool packet_use_direct_xmit(const struct packet_sock *po)
311 {
312         return po->xmit == packet_direct_xmit;
313 }
314
315 static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
316 {
317         return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
318 }
319
320 static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
321 {
322         const struct net_device_ops *ops = dev->netdev_ops;
323         u16 queue_index;
324
325         if (ops->ndo_select_queue) {
326                 queue_index = ops->ndo_select_queue(dev, skb, NULL,
327                                                     __packet_pick_tx_queue);
328                 queue_index = netdev_cap_txqueue(dev, queue_index);
329         } else {
330                 queue_index = __packet_pick_tx_queue(dev, skb);
331         }
332
333         skb_set_queue_mapping(skb, queue_index);
334 }
335
336 /* register_prot_hook must be invoked with the po->bind_lock held,
337  * or from a context in which asynchronous accesses to the packet
338  * socket is not possible (packet_create()).
339  */
340 static void register_prot_hook(struct sock *sk)
341 {
342         struct packet_sock *po = pkt_sk(sk);
343
344         if (!po->running) {
345                 if (po->fanout)
346                         __fanout_link(sk, po);
347                 else
348                         dev_add_pack(&po->prot_hook);
349
350                 sock_hold(sk);
351                 po->running = 1;
352         }
353 }
354
355 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
356  * held.   If the sync parameter is true, we will temporarily drop
357  * the po->bind_lock and do a synchronize_net to make sure no
358  * asynchronous packet processing paths still refer to the elements
359  * of po->prot_hook.  If the sync parameter is false, it is the
360  * callers responsibility to take care of this.
361  */
362 static void __unregister_prot_hook(struct sock *sk, bool sync)
363 {
364         struct packet_sock *po = pkt_sk(sk);
365
366         po->running = 0;
367
368         if (po->fanout)
369                 __fanout_unlink(sk, po);
370         else
371                 __dev_remove_pack(&po->prot_hook);
372
373         __sock_put(sk);
374
375         if (sync) {
376                 spin_unlock(&po->bind_lock);
377                 synchronize_net();
378                 spin_lock(&po->bind_lock);
379         }
380 }
381
382 static void unregister_prot_hook(struct sock *sk, bool sync)
383 {
384         struct packet_sock *po = pkt_sk(sk);
385
386         if (po->running)
387                 __unregister_prot_hook(sk, sync);
388 }
389
390 static inline struct page * __pure pgv_to_page(void *addr)
391 {
392         if (is_vmalloc_addr(addr))
393                 return vmalloc_to_page(addr);
394         return virt_to_page(addr);
395 }
396
397 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
398 {
399         union tpacket_uhdr h;
400
401         h.raw = frame;
402         switch (po->tp_version) {
403         case TPACKET_V1:
404                 h.h1->tp_status = status;
405                 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
406                 break;
407         case TPACKET_V2:
408                 h.h2->tp_status = status;
409                 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
410                 break;
411         case TPACKET_V3:
412         default:
413                 WARN(1, "TPACKET version not supported.\n");
414                 BUG();
415         }
416
417         smp_wmb();
418 }
419
420 static int __packet_get_status(struct packet_sock *po, void *frame)
421 {
422         union tpacket_uhdr h;
423
424         smp_rmb();
425
426         h.raw = frame;
427         switch (po->tp_version) {
428         case TPACKET_V1:
429                 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
430                 return h.h1->tp_status;
431         case TPACKET_V2:
432                 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
433                 return h.h2->tp_status;
434         case TPACKET_V3:
435         default:
436                 WARN(1, "TPACKET version not supported.\n");
437                 BUG();
438                 return 0;
439         }
440 }
441
442 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
443                                    unsigned int flags)
444 {
445         struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
446
447         if (shhwtstamps &&
448             (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
449             ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
450                 return TP_STATUS_TS_RAW_HARDWARE;
451
452         if (ktime_to_timespec_cond(skb->tstamp, ts))
453                 return TP_STATUS_TS_SOFTWARE;
454
455         return 0;
456 }
457
458 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
459                                     struct sk_buff *skb)
460 {
461         union tpacket_uhdr h;
462         struct timespec ts;
463         __u32 ts_status;
464
465         if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
466                 return 0;
467
468         h.raw = frame;
469         switch (po->tp_version) {
470         case TPACKET_V1:
471                 h.h1->tp_sec = ts.tv_sec;
472                 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
473                 break;
474         case TPACKET_V2:
475                 h.h2->tp_sec = ts.tv_sec;
476                 h.h2->tp_nsec = ts.tv_nsec;
477                 break;
478         case TPACKET_V3:
479         default:
480                 WARN(1, "TPACKET version not supported.\n");
481                 BUG();
482         }
483
484         /* one flush is safe, as both fields always lie on the same cacheline */
485         flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
486         smp_wmb();
487
488         return ts_status;
489 }
490
491 static void *packet_lookup_frame(struct packet_sock *po,
492                 struct packet_ring_buffer *rb,
493                 unsigned int position,
494                 int status)
495 {
496         unsigned int pg_vec_pos, frame_offset;
497         union tpacket_uhdr h;
498
499         pg_vec_pos = position / rb->frames_per_block;
500         frame_offset = position % rb->frames_per_block;
501
502         h.raw = rb->pg_vec[pg_vec_pos].buffer +
503                 (frame_offset * rb->frame_size);
504
505         if (status != __packet_get_status(po, h.raw))
506                 return NULL;
507
508         return h.raw;
509 }
510
511 static void *packet_current_frame(struct packet_sock *po,
512                 struct packet_ring_buffer *rb,
513                 int status)
514 {
515         return packet_lookup_frame(po, rb, rb->head, status);
516 }
517
518 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
519 {
520         del_timer_sync(&pkc->retire_blk_timer);
521 }
522
523 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
524                 struct sk_buff_head *rb_queue)
525 {
526         struct tpacket_kbdq_core *pkc;
527
528         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
529
530         spin_lock_bh(&rb_queue->lock);
531         pkc->delete_blk_timer = 1;
532         spin_unlock_bh(&rb_queue->lock);
533
534         prb_del_retire_blk_timer(pkc);
535 }
536
537 static void prb_init_blk_timer(struct packet_sock *po,
538                 struct tpacket_kbdq_core *pkc,
539                 void (*func) (unsigned long))
540 {
541         init_timer(&pkc->retire_blk_timer);
542         pkc->retire_blk_timer.data = (long)po;
543         pkc->retire_blk_timer.function = func;
544         pkc->retire_blk_timer.expires = jiffies;
545 }
546
547 static void prb_setup_retire_blk_timer(struct packet_sock *po)
548 {
549         struct tpacket_kbdq_core *pkc;
550
551         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
552         prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
553 }
554
555 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
556                                 int blk_size_in_bytes)
557 {
558         struct net_device *dev;
559         unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
560         struct ethtool_cmd ecmd;
561         int err;
562         u32 speed;
563
564         rtnl_lock();
565         dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
566         if (unlikely(!dev)) {
567                 rtnl_unlock();
568                 return DEFAULT_PRB_RETIRE_TOV;
569         }
570         err = __ethtool_get_settings(dev, &ecmd);
571         speed = ethtool_cmd_speed(&ecmd);
572         rtnl_unlock();
573         if (!err) {
574                 /*
575                  * If the link speed is so slow you don't really
576                  * need to worry about perf anyways
577                  */
578                 if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
579                         return DEFAULT_PRB_RETIRE_TOV;
580                 } else {
581                         msec = 1;
582                         div = speed / 1000;
583                 }
584         }
585
586         mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
587
588         if (div)
589                 mbits /= div;
590
591         tmo = mbits * msec;
592
593         if (div)
594                 return tmo+1;
595         return tmo;
596 }
597
598 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
599                         union tpacket_req_u *req_u)
600 {
601         p1->feature_req_word = req_u->req3.tp_feature_req_word;
602 }
603
604 static void init_prb_bdqc(struct packet_sock *po,
605                         struct packet_ring_buffer *rb,
606                         struct pgv *pg_vec,
607                         union tpacket_req_u *req_u)
608 {
609         struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
610         struct tpacket_block_desc *pbd;
611
612         memset(p1, 0x0, sizeof(*p1));
613
614         p1->knxt_seq_num = 1;
615         p1->pkbdq = pg_vec;
616         pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
617         p1->pkblk_start = pg_vec[0].buffer;
618         p1->kblk_size = req_u->req3.tp_block_size;
619         p1->knum_blocks = req_u->req3.tp_block_nr;
620         p1->hdrlen = po->tp_hdrlen;
621         p1->version = po->tp_version;
622         p1->last_kactive_blk_num = 0;
623         po->stats.stats3.tp_freeze_q_cnt = 0;
624         if (req_u->req3.tp_retire_blk_tov)
625                 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
626         else
627                 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
628                                                 req_u->req3.tp_block_size);
629         p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
630         p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
631
632         p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
633         prb_init_ft_ops(p1, req_u);
634         prb_setup_retire_blk_timer(po);
635         prb_open_block(p1, pbd);
636 }
637
638 /*  Do NOT update the last_blk_num first.
639  *  Assumes sk_buff_head lock is held.
640  */
641 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
642 {
643         mod_timer(&pkc->retire_blk_timer,
644                         jiffies + pkc->tov_in_jiffies);
645         pkc->last_kactive_blk_num = pkc->kactive_blk_num;
646 }
647
648 /*
649  * Timer logic:
650  * 1) We refresh the timer only when we open a block.
651  *    By doing this we don't waste cycles refreshing the timer
652  *        on packet-by-packet basis.
653  *
654  * With a 1MB block-size, on a 1Gbps line, it will take
655  * i) ~8 ms to fill a block + ii) memcpy etc.
656  * In this cut we are not accounting for the memcpy time.
657  *
658  * So, if the user sets the 'tmo' to 10ms then the timer
659  * will never fire while the block is still getting filled
660  * (which is what we want). However, the user could choose
661  * to close a block early and that's fine.
662  *
663  * But when the timer does fire, we check whether or not to refresh it.
664  * Since the tmo granularity is in msecs, it is not too expensive
665  * to refresh the timer, lets say every '8' msecs.
666  * Either the user can set the 'tmo' or we can derive it based on
667  * a) line-speed and b) block-size.
668  * prb_calc_retire_blk_tmo() calculates the tmo.
669  *
670  */
671 static void prb_retire_rx_blk_timer_expired(unsigned long data)
672 {
673         struct packet_sock *po = (struct packet_sock *)data;
674         struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
675         unsigned int frozen;
676         struct tpacket_block_desc *pbd;
677
678         spin_lock(&po->sk.sk_receive_queue.lock);
679
680         frozen = prb_queue_frozen(pkc);
681         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
682
683         if (unlikely(pkc->delete_blk_timer))
684                 goto out;
685
686         /* We only need to plug the race when the block is partially filled.
687          * tpacket_rcv:
688          *              lock(); increment BLOCK_NUM_PKTS; unlock()
689          *              copy_bits() is in progress ...
690          *              timer fires on other cpu:
691          *              we can't retire the current block because copy_bits
692          *              is in progress.
693          *
694          */
695         if (BLOCK_NUM_PKTS(pbd)) {
696                 while (atomic_read(&pkc->blk_fill_in_prog)) {
697                         /* Waiting for skb_copy_bits to finish... */
698                         cpu_relax();
699                 }
700         }
701
702         if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
703                 if (!frozen) {
704                         if (!BLOCK_NUM_PKTS(pbd)) {
705                                 /* An empty block. Just refresh the timer. */
706                                 goto refresh_timer;
707                         }
708                         prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
709                         if (!prb_dispatch_next_block(pkc, po))
710                                 goto refresh_timer;
711                         else
712                                 goto out;
713                 } else {
714                         /* Case 1. Queue was frozen because user-space was
715                          *         lagging behind.
716                          */
717                         if (prb_curr_blk_in_use(pkc, pbd)) {
718                                 /*
719                                  * Ok, user-space is still behind.
720                                  * So just refresh the timer.
721                                  */
722                                 goto refresh_timer;
723                         } else {
724                                /* Case 2. queue was frozen,user-space caught up,
725                                 * now the link went idle && the timer fired.
726                                 * We don't have a block to close.So we open this
727                                 * block and restart the timer.
728                                 * opening a block thaws the queue,restarts timer
729                                 * Thawing/timer-refresh is a side effect.
730                                 */
731                                 prb_open_block(pkc, pbd);
732                                 goto out;
733                         }
734                 }
735         }
736
737 refresh_timer:
738         _prb_refresh_rx_retire_blk_timer(pkc);
739
740 out:
741         spin_unlock(&po->sk.sk_receive_queue.lock);
742 }
743
744 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
745                 struct tpacket_block_desc *pbd1, __u32 status)
746 {
747         /* Flush everything minus the block header */
748
749 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
750         u8 *start, *end;
751
752         start = (u8 *)pbd1;
753
754         /* Skip the block header(we know header WILL fit in 4K) */
755         start += PAGE_SIZE;
756
757         end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
758         for (; start < end; start += PAGE_SIZE)
759                 flush_dcache_page(pgv_to_page(start));
760
761         smp_wmb();
762 #endif
763
764         /* Now update the block status. */
765
766         BLOCK_STATUS(pbd1) = status;
767
768         /* Flush the block header */
769
770 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
771         start = (u8 *)pbd1;
772         flush_dcache_page(pgv_to_page(start));
773
774         smp_wmb();
775 #endif
776 }
777
778 /*
779  * Side effect:
780  *
781  * 1) flush the block
782  * 2) Increment active_blk_num
783  *
784  * Note:We DONT refresh the timer on purpose.
785  *      Because almost always the next block will be opened.
786  */
787 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
788                 struct tpacket_block_desc *pbd1,
789                 struct packet_sock *po, unsigned int stat)
790 {
791         __u32 status = TP_STATUS_USER | stat;
792
793         struct tpacket3_hdr *last_pkt;
794         struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
795         struct sock *sk = &po->sk;
796
797         if (po->stats.stats3.tp_drops)
798                 status |= TP_STATUS_LOSING;
799
800         last_pkt = (struct tpacket3_hdr *)pkc1->prev;
801         last_pkt->tp_next_offset = 0;
802
803         /* Get the ts of the last pkt */
804         if (BLOCK_NUM_PKTS(pbd1)) {
805                 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
806                 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
807         } else {
808                 /* Ok, we tmo'd - so get the current time.
809                  *
810                  * It shouldn't really happen as we don't close empty
811                  * blocks. See prb_retire_rx_blk_timer_expired().
812                  */
813                 struct timespec ts;
814                 getnstimeofday(&ts);
815                 h1->ts_last_pkt.ts_sec = ts.tv_sec;
816                 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
817         }
818
819         smp_wmb();
820
821         /* Flush the block */
822         prb_flush_block(pkc1, pbd1, status);
823
824         sk->sk_data_ready(sk);
825
826         pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
827 }
828
829 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
830 {
831         pkc->reset_pending_on_curr_blk = 0;
832 }
833
834 /*
835  * Side effect of opening a block:
836  *
837  * 1) prb_queue is thawed.
838  * 2) retire_blk_timer is refreshed.
839  *
840  */
841 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
842         struct tpacket_block_desc *pbd1)
843 {
844         struct timespec ts;
845         struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
846
847         smp_rmb();
848
849         /* We could have just memset this but we will lose the
850          * flexibility of making the priv area sticky
851          */
852
853         BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
854         BLOCK_NUM_PKTS(pbd1) = 0;
855         BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
856
857         getnstimeofday(&ts);
858
859         h1->ts_first_pkt.ts_sec = ts.tv_sec;
860         h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
861
862         pkc1->pkblk_start = (char *)pbd1;
863         pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
864
865         BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
866         BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
867
868         pbd1->version = pkc1->version;
869         pkc1->prev = pkc1->nxt_offset;
870         pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
871
872         prb_thaw_queue(pkc1);
873         _prb_refresh_rx_retire_blk_timer(pkc1);
874
875         smp_wmb();
876 }
877
878 /*
879  * Queue freeze logic:
880  * 1) Assume tp_block_nr = 8 blocks.
881  * 2) At time 't0', user opens Rx ring.
882  * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
883  * 4) user-space is either sleeping or processing block '0'.
884  * 5) tpacket_rcv is currently filling block '7', since there is no space left,
885  *    it will close block-7,loop around and try to fill block '0'.
886  *    call-flow:
887  *    __packet_lookup_frame_in_block
888  *      prb_retire_current_block()
889  *      prb_dispatch_next_block()
890  *        |->(BLOCK_STATUS == USER) evaluates to true
891  *    5.1) Since block-0 is currently in-use, we just freeze the queue.
892  * 6) Now there are two cases:
893  *    6.1) Link goes idle right after the queue is frozen.
894  *         But remember, the last open_block() refreshed the timer.
895  *         When this timer expires,it will refresh itself so that we can
896  *         re-open block-0 in near future.
897  *    6.2) Link is busy and keeps on receiving packets. This is a simple
898  *         case and __packet_lookup_frame_in_block will check if block-0
899  *         is free and can now be re-used.
900  */
901 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
902                                   struct packet_sock *po)
903 {
904         pkc->reset_pending_on_curr_blk = 1;
905         po->stats.stats3.tp_freeze_q_cnt++;
906 }
907
908 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
909
910 /*
911  * If the next block is free then we will dispatch it
912  * and return a good offset.
913  * Else, we will freeze the queue.
914  * So, caller must check the return value.
915  */
916 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
917                 struct packet_sock *po)
918 {
919         struct tpacket_block_desc *pbd;
920
921         smp_rmb();
922
923         /* 1. Get current block num */
924         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
925
926         /* 2. If this block is currently in_use then freeze the queue */
927         if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
928                 prb_freeze_queue(pkc, po);
929                 return NULL;
930         }
931
932         /*
933          * 3.
934          * open this block and return the offset where the first packet
935          * needs to get stored.
936          */
937         prb_open_block(pkc, pbd);
938         return (void *)pkc->nxt_offset;
939 }
940
941 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
942                 struct packet_sock *po, unsigned int status)
943 {
944         struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
945
946         /* retire/close the current block */
947         if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
948                 /*
949                  * Plug the case where copy_bits() is in progress on
950                  * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
951                  * have space to copy the pkt in the current block and
952                  * called prb_retire_current_block()
953                  *
954                  * We don't need to worry about the TMO case because
955                  * the timer-handler already handled this case.
956                  */
957                 if (!(status & TP_STATUS_BLK_TMO)) {
958                         while (atomic_read(&pkc->blk_fill_in_prog)) {
959                                 /* Waiting for skb_copy_bits to finish... */
960                                 cpu_relax();
961                         }
962                 }
963                 prb_close_block(pkc, pbd, po, status);
964                 return;
965         }
966 }
967
968 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
969                                       struct tpacket_block_desc *pbd)
970 {
971         return TP_STATUS_USER & BLOCK_STATUS(pbd);
972 }
973
974 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
975 {
976         return pkc->reset_pending_on_curr_blk;
977 }
978
979 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
980 {
981         struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
982         atomic_dec(&pkc->blk_fill_in_prog);
983 }
984
985 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
986                         struct tpacket3_hdr *ppd)
987 {
988         ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
989 }
990
991 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
992                         struct tpacket3_hdr *ppd)
993 {
994         ppd->hv1.tp_rxhash = 0;
995 }
996
997 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
998                         struct tpacket3_hdr *ppd)
999 {
1000         if (skb_vlan_tag_present(pkc->skb)) {
1001                 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
1002                 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1003                 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1004         } else {
1005                 ppd->hv1.tp_vlan_tci = 0;
1006                 ppd->hv1.tp_vlan_tpid = 0;
1007                 ppd->tp_status = TP_STATUS_AVAILABLE;
1008         }
1009 }
1010
1011 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
1012                         struct tpacket3_hdr *ppd)
1013 {
1014         ppd->hv1.tp_padding = 0;
1015         prb_fill_vlan_info(pkc, ppd);
1016
1017         if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1018                 prb_fill_rxhash(pkc, ppd);
1019         else
1020                 prb_clear_rxhash(pkc, ppd);
1021 }
1022
1023 static void prb_fill_curr_block(char *curr,
1024                                 struct tpacket_kbdq_core *pkc,
1025                                 struct tpacket_block_desc *pbd,
1026                                 unsigned int len)
1027 {
1028         struct tpacket3_hdr *ppd;
1029
1030         ppd  = (struct tpacket3_hdr *)curr;
1031         ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1032         pkc->prev = curr;
1033         pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1034         BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1035         BLOCK_NUM_PKTS(pbd) += 1;
1036         atomic_inc(&pkc->blk_fill_in_prog);
1037         prb_run_all_ft_ops(pkc, ppd);
1038 }
1039
1040 /* Assumes caller has the sk->rx_queue.lock */
1041 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1042                                             struct sk_buff *skb,
1043                                                 int status,
1044                                             unsigned int len
1045                                             )
1046 {
1047         struct tpacket_kbdq_core *pkc;
1048         struct tpacket_block_desc *pbd;
1049         char *curr, *end;
1050
1051         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1052         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1053
1054         /* Queue is frozen when user space is lagging behind */
1055         if (prb_queue_frozen(pkc)) {
1056                 /*
1057                  * Check if that last block which caused the queue to freeze,
1058                  * is still in_use by user-space.
1059                  */
1060                 if (prb_curr_blk_in_use(pkc, pbd)) {
1061                         /* Can't record this packet */
1062                         return NULL;
1063                 } else {
1064                         /*
1065                          * Ok, the block was released by user-space.
1066                          * Now let's open that block.
1067                          * opening a block also thaws the queue.
1068                          * Thawing is a side effect.
1069                          */
1070                         prb_open_block(pkc, pbd);
1071                 }
1072         }
1073
1074         smp_mb();
1075         curr = pkc->nxt_offset;
1076         pkc->skb = skb;
1077         end = (char *)pbd + pkc->kblk_size;
1078
1079         /* first try the current block */
1080         if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1081                 prb_fill_curr_block(curr, pkc, pbd, len);
1082                 return (void *)curr;
1083         }
1084
1085         /* Ok, close the current block */
1086         prb_retire_current_block(pkc, po, 0);
1087
1088         /* Now, try to dispatch the next block */
1089         curr = (char *)prb_dispatch_next_block(pkc, po);
1090         if (curr) {
1091                 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1092                 prb_fill_curr_block(curr, pkc, pbd, len);
1093                 return (void *)curr;
1094         }
1095
1096         /*
1097          * No free blocks are available.user_space hasn't caught up yet.
1098          * Queue was just frozen and now this packet will get dropped.
1099          */
1100         return NULL;
1101 }
1102
1103 static void *packet_current_rx_frame(struct packet_sock *po,
1104                                             struct sk_buff *skb,
1105                                             int status, unsigned int len)
1106 {
1107         char *curr = NULL;
1108         switch (po->tp_version) {
1109         case TPACKET_V1:
1110         case TPACKET_V2:
1111                 curr = packet_lookup_frame(po, &po->rx_ring,
1112                                         po->rx_ring.head, status);
1113                 return curr;
1114         case TPACKET_V3:
1115                 return __packet_lookup_frame_in_block(po, skb, status, len);
1116         default:
1117                 WARN(1, "TPACKET version not supported\n");
1118                 BUG();
1119                 return NULL;
1120         }
1121 }
1122
1123 static void *prb_lookup_block(struct packet_sock *po,
1124                                      struct packet_ring_buffer *rb,
1125                                      unsigned int idx,
1126                                      int status)
1127 {
1128         struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1129         struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1130
1131         if (status != BLOCK_STATUS(pbd))
1132                 return NULL;
1133         return pbd;
1134 }
1135
1136 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1137 {
1138         unsigned int prev;
1139         if (rb->prb_bdqc.kactive_blk_num)
1140                 prev = rb->prb_bdqc.kactive_blk_num-1;
1141         else
1142                 prev = rb->prb_bdqc.knum_blocks-1;
1143         return prev;
1144 }
1145
1146 /* Assumes caller has held the rx_queue.lock */
1147 static void *__prb_previous_block(struct packet_sock *po,
1148                                          struct packet_ring_buffer *rb,
1149                                          int status)
1150 {
1151         unsigned int previous = prb_previous_blk_num(rb);
1152         return prb_lookup_block(po, rb, previous, status);
1153 }
1154
1155 static void *packet_previous_rx_frame(struct packet_sock *po,
1156                                              struct packet_ring_buffer *rb,
1157                                              int status)
1158 {
1159         if (po->tp_version <= TPACKET_V2)
1160                 return packet_previous_frame(po, rb, status);
1161
1162         return __prb_previous_block(po, rb, status);
1163 }
1164
1165 static void packet_increment_rx_head(struct packet_sock *po,
1166                                             struct packet_ring_buffer *rb)
1167 {
1168         switch (po->tp_version) {
1169         case TPACKET_V1:
1170         case TPACKET_V2:
1171                 return packet_increment_head(rb);
1172         case TPACKET_V3:
1173         default:
1174                 WARN(1, "TPACKET version not supported.\n");
1175                 BUG();
1176                 return;
1177         }
1178 }
1179
1180 static void *packet_previous_frame(struct packet_sock *po,
1181                 struct packet_ring_buffer *rb,
1182                 int status)
1183 {
1184         unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1185         return packet_lookup_frame(po, rb, previous, status);
1186 }
1187
1188 static void packet_increment_head(struct packet_ring_buffer *buff)
1189 {
1190         buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1191 }
1192
1193 static void packet_inc_pending(struct packet_ring_buffer *rb)
1194 {
1195         this_cpu_inc(*rb->pending_refcnt);
1196 }
1197
1198 static void packet_dec_pending(struct packet_ring_buffer *rb)
1199 {
1200         this_cpu_dec(*rb->pending_refcnt);
1201 }
1202
1203 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1204 {
1205         unsigned int refcnt = 0;
1206         int cpu;
1207
1208         /* We don't use pending refcount in rx_ring. */
1209         if (rb->pending_refcnt == NULL)
1210                 return 0;
1211
1212         for_each_possible_cpu(cpu)
1213                 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1214
1215         return refcnt;
1216 }
1217
1218 static int packet_alloc_pending(struct packet_sock *po)
1219 {
1220         po->rx_ring.pending_refcnt = NULL;
1221
1222         po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1223         if (unlikely(po->tx_ring.pending_refcnt == NULL))
1224                 return -ENOBUFS;
1225
1226         return 0;
1227 }
1228
1229 static void packet_free_pending(struct packet_sock *po)
1230 {
1231         free_percpu(po->tx_ring.pending_refcnt);
1232 }
1233
1234 #define ROOM_POW_OFF    2
1235 #define ROOM_NONE       0x0
1236 #define ROOM_LOW        0x1
1237 #define ROOM_NORMAL     0x2
1238
1239 static bool __tpacket_has_room(struct packet_sock *po, int pow_off)
1240 {
1241         int idx, len;
1242
1243         len = po->rx_ring.frame_max + 1;
1244         idx = po->rx_ring.head;
1245         if (pow_off)
1246                 idx += len >> pow_off;
1247         if (idx >= len)
1248                 idx -= len;
1249         return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1250 }
1251
1252 static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off)
1253 {
1254         int idx, len;
1255
1256         len = po->rx_ring.prb_bdqc.knum_blocks;
1257         idx = po->rx_ring.prb_bdqc.kactive_blk_num;
1258         if (pow_off)
1259                 idx += len >> pow_off;
1260         if (idx >= len)
1261                 idx -= len;
1262         return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1263 }
1264
1265 static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1266 {
1267         struct sock *sk = &po->sk;
1268         int ret = ROOM_NONE;
1269
1270         if (po->prot_hook.func != tpacket_rcv) {
1271                 int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1272                                           - (skb ? skb->truesize : 0);
1273                 if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF))
1274                         return ROOM_NORMAL;
1275                 else if (avail > 0)
1276                         return ROOM_LOW;
1277                 else
1278                         return ROOM_NONE;
1279         }
1280
1281         if (po->tp_version == TPACKET_V3) {
1282                 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1283                         ret = ROOM_NORMAL;
1284                 else if (__tpacket_v3_has_room(po, 0))
1285                         ret = ROOM_LOW;
1286         } else {
1287                 if (__tpacket_has_room(po, ROOM_POW_OFF))
1288                         ret = ROOM_NORMAL;
1289                 else if (__tpacket_has_room(po, 0))
1290                         ret = ROOM_LOW;
1291         }
1292
1293         return ret;
1294 }
1295
1296 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1297 {
1298         int ret;
1299         bool has_room;
1300
1301         spin_lock_bh(&po->sk.sk_receive_queue.lock);
1302         ret = __packet_rcv_has_room(po, skb);
1303         has_room = ret == ROOM_NORMAL;
1304         if (po->pressure == has_room)
1305                 po->pressure = !has_room;
1306         spin_unlock_bh(&po->sk.sk_receive_queue.lock);
1307
1308         return ret;
1309 }
1310
1311 static void packet_sock_destruct(struct sock *sk)
1312 {
1313         skb_queue_purge(&sk->sk_error_queue);
1314
1315         WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1316         WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1317
1318         if (!sock_flag(sk, SOCK_DEAD)) {
1319                 pr_err("Attempt to release alive packet socket: %p\n", sk);
1320                 return;
1321         }
1322
1323         sk_refcnt_debug_dec(sk);
1324 }
1325
1326 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1327 {
1328         u32 rxhash;
1329         int i, count = 0;
1330
1331         rxhash = skb_get_hash(skb);
1332         for (i = 0; i < ROLLOVER_HLEN; i++)
1333                 if (po->rollover->history[i] == rxhash)
1334                         count++;
1335
1336         po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
1337         return count > (ROLLOVER_HLEN >> 1);
1338 }
1339
1340 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1341                                       struct sk_buff *skb,
1342                                       unsigned int num)
1343 {
1344         return reciprocal_scale(skb_get_hash(skb), num);
1345 }
1346
1347 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1348                                     struct sk_buff *skb,
1349                                     unsigned int num)
1350 {
1351         unsigned int val = atomic_inc_return(&f->rr_cur);
1352
1353         return val % num;
1354 }
1355
1356 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1357                                      struct sk_buff *skb,
1358                                      unsigned int num)
1359 {
1360         return smp_processor_id() % num;
1361 }
1362
1363 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1364                                      struct sk_buff *skb,
1365                                      unsigned int num)
1366 {
1367         return prandom_u32_max(num);
1368 }
1369
1370 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1371                                           struct sk_buff *skb,
1372                                           unsigned int idx, bool try_self,
1373                                           unsigned int num)
1374 {
1375         struct packet_sock *po, *po_next, *po_skip = NULL;
1376         unsigned int i, j, room = ROOM_NONE;
1377
1378         po = pkt_sk(f->arr[idx]);
1379
1380         if (try_self) {
1381                 room = packet_rcv_has_room(po, skb);
1382                 if (room == ROOM_NORMAL ||
1383                     (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1384                         return idx;
1385                 po_skip = po;
1386         }
1387
1388         i = j = min_t(int, po->rollover->sock, num - 1);
1389         do {
1390                 po_next = pkt_sk(f->arr[i]);
1391                 if (po_next != po_skip && !po_next->pressure &&
1392                     packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1393                         if (i != j)
1394                                 po->rollover->sock = i;
1395                         atomic_long_inc(&po->rollover->num);
1396                         if (room == ROOM_LOW)
1397                                 atomic_long_inc(&po->rollover->num_huge);
1398                         return i;
1399                 }
1400
1401                 if (++i == num)
1402                         i = 0;
1403         } while (i != j);
1404
1405         atomic_long_inc(&po->rollover->num_failed);
1406         return idx;
1407 }
1408
1409 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1410                                     struct sk_buff *skb,
1411                                     unsigned int num)
1412 {
1413         return skb_get_queue_mapping(skb) % num;
1414 }
1415
1416 static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1417                                      struct sk_buff *skb,
1418                                      unsigned int num)
1419 {
1420         struct bpf_prog *prog;
1421         unsigned int ret = 0;
1422
1423         rcu_read_lock();
1424         prog = rcu_dereference(f->bpf_prog);
1425         if (prog)
1426                 ret = bpf_prog_run_clear_cb(prog, skb) % num;
1427         rcu_read_unlock();
1428
1429         return ret;
1430 }
1431
1432 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1433 {
1434         return f->flags & (flag >> 8);
1435 }
1436
1437 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1438                              struct packet_type *pt, struct net_device *orig_dev)
1439 {
1440         struct packet_fanout *f = pt->af_packet_priv;
1441         unsigned int num = READ_ONCE(f->num_members);
1442         struct net *net = read_pnet(&f->net);
1443         struct packet_sock *po;
1444         unsigned int idx;
1445
1446         if (!net_eq(dev_net(dev), net) || !num) {
1447                 kfree_skb(skb);
1448                 return 0;
1449         }
1450
1451         if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1452                 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1453                 if (!skb)
1454                         return 0;
1455         }
1456         switch (f->type) {
1457         case PACKET_FANOUT_HASH:
1458         default:
1459                 idx = fanout_demux_hash(f, skb, num);
1460                 break;
1461         case PACKET_FANOUT_LB:
1462                 idx = fanout_demux_lb(f, skb, num);
1463                 break;
1464         case PACKET_FANOUT_CPU:
1465                 idx = fanout_demux_cpu(f, skb, num);
1466                 break;
1467         case PACKET_FANOUT_RND:
1468                 idx = fanout_demux_rnd(f, skb, num);
1469                 break;
1470         case PACKET_FANOUT_QM:
1471                 idx = fanout_demux_qm(f, skb, num);
1472                 break;
1473         case PACKET_FANOUT_ROLLOVER:
1474                 idx = fanout_demux_rollover(f, skb, 0, false, num);
1475                 break;
1476         case PACKET_FANOUT_CBPF:
1477         case PACKET_FANOUT_EBPF:
1478                 idx = fanout_demux_bpf(f, skb, num);
1479                 break;
1480         }
1481
1482         if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1483                 idx = fanout_demux_rollover(f, skb, idx, true, num);
1484
1485         po = pkt_sk(f->arr[idx]);
1486         return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1487 }
1488
1489 DEFINE_MUTEX(fanout_mutex);
1490 EXPORT_SYMBOL_GPL(fanout_mutex);
1491 static LIST_HEAD(fanout_list);
1492
1493 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1494 {
1495         struct packet_fanout *f = po->fanout;
1496
1497         spin_lock(&f->lock);
1498         f->arr[f->num_members] = sk;
1499         smp_wmb();
1500         f->num_members++;
1501         spin_unlock(&f->lock);
1502 }
1503
1504 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1505 {
1506         struct packet_fanout *f = po->fanout;
1507         int i;
1508
1509         spin_lock(&f->lock);
1510         for (i = 0; i < f->num_members; i++) {
1511                 if (f->arr[i] == sk)
1512                         break;
1513         }
1514         BUG_ON(i >= f->num_members);
1515         f->arr[i] = f->arr[f->num_members - 1];
1516         f->num_members--;
1517         spin_unlock(&f->lock);
1518 }
1519
1520 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1521 {
1522         if (sk->sk_family != PF_PACKET)
1523                 return false;
1524
1525         return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1526 }
1527
1528 static void fanout_init_data(struct packet_fanout *f)
1529 {
1530         switch (f->type) {
1531         case PACKET_FANOUT_LB:
1532                 atomic_set(&f->rr_cur, 0);
1533                 break;
1534         case PACKET_FANOUT_CBPF:
1535         case PACKET_FANOUT_EBPF:
1536                 RCU_INIT_POINTER(f->bpf_prog, NULL);
1537                 break;
1538         }
1539 }
1540
1541 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1542 {
1543         struct bpf_prog *old;
1544
1545         spin_lock(&f->lock);
1546         old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1547         rcu_assign_pointer(f->bpf_prog, new);
1548         spin_unlock(&f->lock);
1549
1550         if (old) {
1551                 synchronize_net();
1552                 bpf_prog_destroy(old);
1553         }
1554 }
1555
1556 static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
1557                                 unsigned int len)
1558 {
1559         struct bpf_prog *new;
1560         struct sock_fprog fprog;
1561         int ret;
1562
1563         if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1564                 return -EPERM;
1565         if (len != sizeof(fprog))
1566                 return -EINVAL;
1567         if (copy_from_user(&fprog, data, len))
1568                 return -EFAULT;
1569
1570         ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1571         if (ret)
1572                 return ret;
1573
1574         __fanout_set_data_bpf(po->fanout, new);
1575         return 0;
1576 }
1577
1578 static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
1579                                 unsigned int len)
1580 {
1581         struct bpf_prog *new;
1582         u32 fd;
1583
1584         if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1585                 return -EPERM;
1586         if (len != sizeof(fd))
1587                 return -EINVAL;
1588         if (copy_from_user(&fd, data, len))
1589                 return -EFAULT;
1590
1591         new = bpf_prog_get(fd);
1592         if (IS_ERR(new))
1593                 return PTR_ERR(new);
1594         if (new->type != BPF_PROG_TYPE_SOCKET_FILTER) {
1595                 bpf_prog_put(new);
1596                 return -EINVAL;
1597         }
1598
1599         __fanout_set_data_bpf(po->fanout, new);
1600         return 0;
1601 }
1602
1603 static int fanout_set_data(struct packet_sock *po, char __user *data,
1604                            unsigned int len)
1605 {
1606         switch (po->fanout->type) {
1607         case PACKET_FANOUT_CBPF:
1608                 return fanout_set_data_cbpf(po, data, len);
1609         case PACKET_FANOUT_EBPF:
1610                 return fanout_set_data_ebpf(po, data, len);
1611         default:
1612                 return -EINVAL;
1613         };
1614 }
1615
1616 static void fanout_release_data(struct packet_fanout *f)
1617 {
1618         switch (f->type) {
1619         case PACKET_FANOUT_CBPF:
1620         case PACKET_FANOUT_EBPF:
1621                 __fanout_set_data_bpf(f, NULL);
1622         };
1623 }
1624
1625 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1626 {
1627         struct packet_sock *po = pkt_sk(sk);
1628         struct packet_fanout *f, *match;
1629         u8 type = type_flags & 0xff;
1630         u8 flags = type_flags >> 8;
1631         int err;
1632
1633         switch (type) {
1634         case PACKET_FANOUT_ROLLOVER:
1635                 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1636                         return -EINVAL;
1637         case PACKET_FANOUT_HASH:
1638         case PACKET_FANOUT_LB:
1639         case PACKET_FANOUT_CPU:
1640         case PACKET_FANOUT_RND:
1641         case PACKET_FANOUT_QM:
1642         case PACKET_FANOUT_CBPF:
1643         case PACKET_FANOUT_EBPF:
1644                 break;
1645         default:
1646                 return -EINVAL;
1647         }
1648
1649         if (!po->running)
1650                 return -EINVAL;
1651
1652         if (po->fanout)
1653                 return -EALREADY;
1654
1655         if (type == PACKET_FANOUT_ROLLOVER ||
1656             (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1657                 po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL);
1658                 if (!po->rollover)
1659                         return -ENOMEM;
1660                 atomic_long_set(&po->rollover->num, 0);
1661                 atomic_long_set(&po->rollover->num_huge, 0);
1662                 atomic_long_set(&po->rollover->num_failed, 0);
1663         }
1664
1665         mutex_lock(&fanout_mutex);
1666         match = NULL;
1667         list_for_each_entry(f, &fanout_list, list) {
1668                 if (f->id == id &&
1669                     read_pnet(&f->net) == sock_net(sk)) {
1670                         match = f;
1671                         break;
1672                 }
1673         }
1674         err = -EINVAL;
1675         if (match && match->flags != flags)
1676                 goto out;
1677         if (!match) {
1678                 err = -ENOMEM;
1679                 match = kzalloc(sizeof(*match), GFP_KERNEL);
1680                 if (!match)
1681                         goto out;
1682                 write_pnet(&match->net, sock_net(sk));
1683                 match->id = id;
1684                 match->type = type;
1685                 match->flags = flags;
1686                 INIT_LIST_HEAD(&match->list);
1687                 spin_lock_init(&match->lock);
1688                 atomic_set(&match->sk_ref, 0);
1689                 fanout_init_data(match);
1690                 match->prot_hook.type = po->prot_hook.type;
1691                 match->prot_hook.dev = po->prot_hook.dev;
1692                 match->prot_hook.func = packet_rcv_fanout;
1693                 match->prot_hook.af_packet_priv = match;
1694                 match->prot_hook.id_match = match_fanout_group;
1695                 dev_add_pack(&match->prot_hook);
1696                 list_add(&match->list, &fanout_list);
1697         }
1698         err = -EINVAL;
1699         if (match->type == type &&
1700             match->prot_hook.type == po->prot_hook.type &&
1701             match->prot_hook.dev == po->prot_hook.dev) {
1702                 err = -ENOSPC;
1703                 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1704                         __dev_remove_pack(&po->prot_hook);
1705                         po->fanout = match;
1706                         atomic_inc(&match->sk_ref);
1707                         __fanout_link(sk, po);
1708                         err = 0;
1709                 }
1710         }
1711 out:
1712         mutex_unlock(&fanout_mutex);
1713         if (err) {
1714                 kfree(po->rollover);
1715                 po->rollover = NULL;
1716         }
1717         return err;
1718 }
1719
1720 static void fanout_release(struct sock *sk)
1721 {
1722         struct packet_sock *po = pkt_sk(sk);
1723         struct packet_fanout *f;
1724
1725         f = po->fanout;
1726         if (!f)
1727                 return;
1728
1729         mutex_lock(&fanout_mutex);
1730         po->fanout = NULL;
1731
1732         if (atomic_dec_and_test(&f->sk_ref)) {
1733                 list_del(&f->list);
1734                 dev_remove_pack(&f->prot_hook);
1735                 fanout_release_data(f);
1736                 kfree(f);
1737         }
1738         mutex_unlock(&fanout_mutex);
1739
1740         if (po->rollover)
1741                 kfree_rcu(po->rollover, rcu);
1742 }
1743
1744 static const struct proto_ops packet_ops;
1745
1746 static const struct proto_ops packet_ops_spkt;
1747
1748 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1749                            struct packet_type *pt, struct net_device *orig_dev)
1750 {
1751         struct sock *sk;
1752         struct sockaddr_pkt *spkt;
1753
1754         /*
1755          *      When we registered the protocol we saved the socket in the data
1756          *      field for just this event.
1757          */
1758
1759         sk = pt->af_packet_priv;
1760
1761         /*
1762          *      Yank back the headers [hope the device set this
1763          *      right or kerboom...]
1764          *
1765          *      Incoming packets have ll header pulled,
1766          *      push it back.
1767          *
1768          *      For outgoing ones skb->data == skb_mac_header(skb)
1769          *      so that this procedure is noop.
1770          */
1771
1772         if (skb->pkt_type == PACKET_LOOPBACK)
1773                 goto out;
1774
1775         if (!net_eq(dev_net(dev), sock_net(sk)))
1776                 goto out;
1777
1778         skb = skb_share_check(skb, GFP_ATOMIC);
1779         if (skb == NULL)
1780                 goto oom;
1781
1782         /* drop any routing info */
1783         skb_dst_drop(skb);
1784
1785         /* drop conntrack reference */
1786         nf_reset(skb);
1787
1788         spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1789
1790         skb_push(skb, skb->data - skb_mac_header(skb));
1791
1792         /*
1793          *      The SOCK_PACKET socket receives _all_ frames.
1794          */
1795
1796         spkt->spkt_family = dev->type;
1797         strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1798         spkt->spkt_protocol = skb->protocol;
1799
1800         /*
1801          *      Charge the memory to the socket. This is done specifically
1802          *      to prevent sockets using all the memory up.
1803          */
1804
1805         if (sock_queue_rcv_skb(sk, skb) == 0)
1806                 return 0;
1807
1808 out:
1809         kfree_skb(skb);
1810 oom:
1811         return 0;
1812 }
1813
1814
1815 /*
1816  *      Output a raw packet to a device layer. This bypasses all the other
1817  *      protocol layers and you must therefore supply it with a complete frame
1818  */
1819
1820 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1821                                size_t len)
1822 {
1823         struct sock *sk = sock->sk;
1824         DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1825         struct sk_buff *skb = NULL;
1826         struct net_device *dev;
1827         __be16 proto = 0;
1828         int err;
1829         int extra_len = 0;
1830
1831         /*
1832          *      Get and verify the address.
1833          */
1834
1835         if (saddr) {
1836                 if (msg->msg_namelen < sizeof(struct sockaddr))
1837                         return -EINVAL;
1838                 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1839                         proto = saddr->spkt_protocol;
1840         } else
1841                 return -ENOTCONN;       /* SOCK_PACKET must be sent giving an address */
1842
1843         /*
1844          *      Find the device first to size check it
1845          */
1846
1847         saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1848 retry:
1849         rcu_read_lock();
1850         dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1851         err = -ENODEV;
1852         if (dev == NULL)
1853                 goto out_unlock;
1854
1855         err = -ENETDOWN;
1856         if (!(dev->flags & IFF_UP))
1857                 goto out_unlock;
1858
1859         /*
1860          * You may not queue a frame bigger than the mtu. This is the lowest level
1861          * raw protocol and you must do your own fragmentation at this level.
1862          */
1863
1864         if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1865                 if (!netif_supports_nofcs(dev)) {
1866                         err = -EPROTONOSUPPORT;
1867                         goto out_unlock;
1868                 }
1869                 extra_len = 4; /* We're doing our own CRC */
1870         }
1871
1872         err = -EMSGSIZE;
1873         if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1874                 goto out_unlock;
1875
1876         if (!skb) {
1877                 size_t reserved = LL_RESERVED_SPACE(dev);
1878                 int tlen = dev->needed_tailroom;
1879                 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1880
1881                 rcu_read_unlock();
1882                 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1883                 if (skb == NULL)
1884                         return -ENOBUFS;
1885                 /* FIXME: Save some space for broken drivers that write a hard
1886                  * header at transmission time by themselves. PPP is the notable
1887                  * one here. This should really be fixed at the driver level.
1888                  */
1889                 skb_reserve(skb, reserved);
1890                 skb_reset_network_header(skb);
1891
1892                 /* Try to align data part correctly */
1893                 if (hhlen) {
1894                         skb->data -= hhlen;
1895                         skb->tail -= hhlen;
1896                         if (len < hhlen)
1897                                 skb_reset_network_header(skb);
1898                 }
1899                 err = memcpy_from_msg(skb_put(skb, len), msg, len);
1900                 if (err)
1901                         goto out_free;
1902                 goto retry;
1903         }
1904
1905         if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
1906                 /* Earlier code assumed this would be a VLAN pkt,
1907                  * double-check this now that we have the actual
1908                  * packet in hand.
1909                  */
1910                 struct ethhdr *ehdr;
1911                 skb_reset_mac_header(skb);
1912                 ehdr = eth_hdr(skb);
1913                 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1914                         err = -EMSGSIZE;
1915                         goto out_unlock;
1916                 }
1917         }
1918
1919         skb->protocol = proto;
1920         skb->dev = dev;
1921         skb->priority = sk->sk_priority;
1922         skb->mark = sk->sk_mark;
1923
1924         sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1925
1926         if (unlikely(extra_len == 4))
1927                 skb->no_fcs = 1;
1928
1929         skb_probe_transport_header(skb, 0);
1930
1931         dev_queue_xmit(skb);
1932         rcu_read_unlock();
1933         return len;
1934
1935 out_unlock:
1936         rcu_read_unlock();
1937 out_free:
1938         kfree_skb(skb);
1939         return err;
1940 }
1941
1942 static unsigned int run_filter(struct sk_buff *skb,
1943                                const struct sock *sk,
1944                                unsigned int res)
1945 {
1946         struct sk_filter *filter;
1947
1948         rcu_read_lock();
1949         filter = rcu_dereference(sk->sk_filter);
1950         if (filter != NULL)
1951                 res = bpf_prog_run_clear_cb(filter->prog, skb);
1952         rcu_read_unlock();
1953
1954         return res;
1955 }
1956
1957 /*
1958  * This function makes lazy skb cloning in hope that most of packets
1959  * are discarded by BPF.
1960  *
1961  * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1962  * and skb->cb are mangled. It works because (and until) packets
1963  * falling here are owned by current CPU. Output packets are cloned
1964  * by dev_queue_xmit_nit(), input packets are processed by net_bh
1965  * sequencially, so that if we return skb to original state on exit,
1966  * we will not harm anyone.
1967  */
1968
1969 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1970                       struct packet_type *pt, struct net_device *orig_dev)
1971 {
1972         struct sock *sk;
1973         struct sockaddr_ll *sll;
1974         struct packet_sock *po;
1975         u8 *skb_head = skb->data;
1976         int skb_len = skb->len;
1977         unsigned int snaplen, res;
1978
1979         if (skb->pkt_type == PACKET_LOOPBACK)
1980                 goto drop;
1981
1982         sk = pt->af_packet_priv;
1983         po = pkt_sk(sk);
1984
1985         if (!net_eq(dev_net(dev), sock_net(sk)))
1986                 goto drop;
1987
1988         skb->dev = dev;
1989
1990         if (dev->header_ops) {
1991                 /* The device has an explicit notion of ll header,
1992                  * exported to higher levels.
1993                  *
1994                  * Otherwise, the device hides details of its frame
1995                  * structure, so that corresponding packet head is
1996                  * never delivered to user.
1997                  */
1998                 if (sk->sk_type != SOCK_DGRAM)
1999                         skb_push(skb, skb->data - skb_mac_header(skb));
2000                 else if (skb->pkt_type == PACKET_OUTGOING) {
2001                         /* Special case: outgoing packets have ll header at head */
2002                         skb_pull(skb, skb_network_offset(skb));
2003                 }
2004         }
2005
2006         snaplen = skb->len;
2007
2008         res = run_filter(skb, sk, snaplen);
2009         if (!res)
2010                 goto drop_n_restore;
2011         if (snaplen > res)
2012                 snaplen = res;
2013
2014         if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2015                 goto drop_n_acct;
2016
2017         if (skb_shared(skb)) {
2018                 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2019                 if (nskb == NULL)
2020                         goto drop_n_acct;
2021
2022                 if (skb_head != skb->data) {
2023                         skb->data = skb_head;
2024                         skb->len = skb_len;
2025                 }
2026                 consume_skb(skb);
2027                 skb = nskb;
2028         }
2029
2030         sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2031
2032         sll = &PACKET_SKB_CB(skb)->sa.ll;
2033         sll->sll_hatype = dev->type;
2034         sll->sll_pkttype = skb->pkt_type;
2035         if (unlikely(po->origdev))
2036                 sll->sll_ifindex = orig_dev->ifindex;
2037         else
2038                 sll->sll_ifindex = dev->ifindex;
2039
2040         sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2041
2042         /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2043          * Use their space for storing the original skb length.
2044          */
2045         PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2046
2047         if (pskb_trim(skb, snaplen))
2048                 goto drop_n_acct;
2049
2050         skb_set_owner_r(skb, sk);
2051         skb->dev = NULL;
2052         skb_dst_drop(skb);
2053
2054         /* drop conntrack reference */
2055         nf_reset(skb);
2056
2057         spin_lock(&sk->sk_receive_queue.lock);
2058         po->stats.stats1.tp_packets++;
2059         sock_skb_set_dropcount(sk, skb);
2060         __skb_queue_tail(&sk->sk_receive_queue, skb);
2061         spin_unlock(&sk->sk_receive_queue.lock);
2062         sk->sk_data_ready(sk);
2063         return 0;
2064
2065 drop_n_acct:
2066         spin_lock(&sk->sk_receive_queue.lock);
2067         po->stats.stats1.tp_drops++;
2068         atomic_inc(&sk->sk_drops);
2069         spin_unlock(&sk->sk_receive_queue.lock);
2070
2071 drop_n_restore:
2072         if (skb_head != skb->data && skb_shared(skb)) {
2073                 skb->data = skb_head;
2074                 skb->len = skb_len;
2075         }
2076 drop:
2077         consume_skb(skb);
2078         return 0;
2079 }
2080
2081 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2082                        struct packet_type *pt, struct net_device *orig_dev)
2083 {
2084         struct sock *sk;
2085         struct packet_sock *po;
2086         struct sockaddr_ll *sll;
2087         union tpacket_uhdr h;
2088         u8 *skb_head = skb->data;
2089         int skb_len = skb->len;
2090         unsigned int snaplen, res;
2091         unsigned long status = TP_STATUS_USER;
2092         unsigned short macoff, netoff, hdrlen;
2093         struct sk_buff *copy_skb = NULL;
2094         struct timespec ts;
2095         __u32 ts_status;
2096
2097         /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2098          * We may add members to them until current aligned size without forcing
2099          * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2100          */
2101         BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2102         BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2103
2104         if (skb->pkt_type == PACKET_LOOPBACK)
2105                 goto drop;
2106
2107         sk = pt->af_packet_priv;
2108         po = pkt_sk(sk);
2109
2110         if (!net_eq(dev_net(dev), sock_net(sk)))
2111                 goto drop;
2112
2113         if (dev->header_ops) {
2114                 if (sk->sk_type != SOCK_DGRAM)
2115                         skb_push(skb, skb->data - skb_mac_header(skb));
2116                 else if (skb->pkt_type == PACKET_OUTGOING) {
2117                         /* Special case: outgoing packets have ll header at head */
2118                         skb_pull(skb, skb_network_offset(skb));
2119                 }
2120         }
2121
2122         snaplen = skb->len;
2123
2124         res = run_filter(skb, sk, snaplen);
2125         if (!res)
2126                 goto drop_n_restore;
2127
2128         if (skb->ip_summed == CHECKSUM_PARTIAL)
2129                 status |= TP_STATUS_CSUMNOTREADY;
2130         else if (skb->pkt_type != PACKET_OUTGOING &&
2131                  (skb->ip_summed == CHECKSUM_COMPLETE ||
2132                   skb_csum_unnecessary(skb)))
2133                 status |= TP_STATUS_CSUM_VALID;
2134
2135         if (snaplen > res)
2136                 snaplen = res;
2137
2138         if (sk->sk_type == SOCK_DGRAM) {
2139                 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2140                                   po->tp_reserve;
2141         } else {
2142                 unsigned int maclen = skb_network_offset(skb);
2143                 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2144                                        (maclen < 16 ? 16 : maclen)) +
2145                         po->tp_reserve;
2146                 macoff = netoff - maclen;
2147         }
2148         if (po->tp_version <= TPACKET_V2) {
2149                 if (macoff + snaplen > po->rx_ring.frame_size) {
2150                         if (po->copy_thresh &&
2151                             atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2152                                 if (skb_shared(skb)) {
2153                                         copy_skb = skb_clone(skb, GFP_ATOMIC);
2154                                 } else {
2155                                         copy_skb = skb_get(skb);
2156                                         skb_head = skb->data;
2157                                 }
2158                                 if (copy_skb)
2159                                         skb_set_owner_r(copy_skb, sk);
2160                         }
2161                         snaplen = po->rx_ring.frame_size - macoff;
2162                         if ((int)snaplen < 0)
2163                                 snaplen = 0;
2164                 }
2165         } else if (unlikely(macoff + snaplen >
2166                             GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2167                 u32 nval;
2168
2169                 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2170                 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2171                             snaplen, nval, macoff);
2172                 snaplen = nval;
2173                 if (unlikely((int)snaplen < 0)) {
2174                         snaplen = 0;
2175                         macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2176                 }
2177         }
2178         spin_lock(&sk->sk_receive_queue.lock);
2179         h.raw = packet_current_rx_frame(po, skb,
2180                                         TP_STATUS_KERNEL, (macoff+snaplen));
2181         if (!h.raw)
2182                 goto ring_is_full;
2183         if (po->tp_version <= TPACKET_V2) {
2184                 packet_increment_rx_head(po, &po->rx_ring);
2185         /*
2186          * LOSING will be reported till you read the stats,
2187          * because it's COR - Clear On Read.
2188          * Anyways, moving it for V1/V2 only as V3 doesn't need this
2189          * at packet level.
2190          */
2191                 if (po->stats.stats1.tp_drops)
2192                         status |= TP_STATUS_LOSING;
2193         }
2194         po->stats.stats1.tp_packets++;
2195         if (copy_skb) {
2196                 status |= TP_STATUS_COPY;
2197                 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2198         }
2199         spin_unlock(&sk->sk_receive_queue.lock);
2200
2201         skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2202
2203         if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2204                 getnstimeofday(&ts);
2205
2206         status |= ts_status;
2207
2208         switch (po->tp_version) {
2209         case TPACKET_V1:
2210                 h.h1->tp_len = skb->len;
2211                 h.h1->tp_snaplen = snaplen;
2212                 h.h1->tp_mac = macoff;
2213                 h.h1->tp_net = netoff;
2214                 h.h1->tp_sec = ts.tv_sec;
2215                 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2216                 hdrlen = sizeof(*h.h1);
2217                 break;
2218         case TPACKET_V2:
2219                 h.h2->tp_len = skb->len;
2220                 h.h2->tp_snaplen = snaplen;
2221                 h.h2->tp_mac = macoff;
2222                 h.h2->tp_net = netoff;
2223                 h.h2->tp_sec = ts.tv_sec;
2224                 h.h2->tp_nsec = ts.tv_nsec;
2225                 if (skb_vlan_tag_present(skb)) {
2226                         h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2227                         h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2228                         status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2229                 } else {
2230                         h.h2->tp_vlan_tci = 0;
2231                         h.h2->tp_vlan_tpid = 0;
2232                 }
2233                 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2234                 hdrlen = sizeof(*h.h2);
2235                 break;
2236         case TPACKET_V3:
2237                 /* tp_nxt_offset,vlan are already populated above.
2238                  * So DONT clear those fields here
2239                  */
2240                 h.h3->tp_status |= status;
2241                 h.h3->tp_len = skb->len;
2242                 h.h3->tp_snaplen = snaplen;
2243                 h.h3->tp_mac = macoff;
2244                 h.h3->tp_net = netoff;
2245                 h.h3->tp_sec  = ts.tv_sec;
2246                 h.h3->tp_nsec = ts.tv_nsec;
2247                 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2248                 hdrlen = sizeof(*h.h3);
2249                 break;
2250         default:
2251                 BUG();
2252         }
2253
2254         sll = h.raw + TPACKET_ALIGN(hdrlen);
2255         sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2256         sll->sll_family = AF_PACKET;
2257         sll->sll_hatype = dev->type;
2258         sll->sll_protocol = skb->protocol;
2259         sll->sll_pkttype = skb->pkt_type;
2260         if (unlikely(po->origdev))
2261                 sll->sll_ifindex = orig_dev->ifindex;
2262         else
2263                 sll->sll_ifindex = dev->ifindex;
2264
2265         smp_mb();
2266
2267 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2268         if (po->tp_version <= TPACKET_V2) {
2269                 u8 *start, *end;
2270
2271                 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2272                                         macoff + snaplen);
2273
2274                 for (start = h.raw; start < end; start += PAGE_SIZE)
2275                         flush_dcache_page(pgv_to_page(start));
2276         }
2277         smp_wmb();
2278 #endif
2279
2280         if (po->tp_version <= TPACKET_V2) {
2281                 __packet_set_status(po, h.raw, status);
2282                 sk->sk_data_ready(sk);
2283         } else {
2284                 prb_clear_blk_fill_status(&po->rx_ring);
2285         }
2286
2287 drop_n_restore:
2288         if (skb_head != skb->data && skb_shared(skb)) {
2289                 skb->data = skb_head;
2290                 skb->len = skb_len;
2291         }
2292 drop:
2293         kfree_skb(skb);
2294         return 0;
2295
2296 ring_is_full:
2297         po->stats.stats1.tp_drops++;
2298         spin_unlock(&sk->sk_receive_queue.lock);
2299
2300         sk->sk_data_ready(sk);
2301         kfree_skb(copy_skb);
2302         goto drop_n_restore;
2303 }
2304
2305 static void tpacket_destruct_skb(struct sk_buff *skb)
2306 {
2307         struct packet_sock *po = pkt_sk(skb->sk);
2308
2309         if (likely(po->tx_ring.pg_vec)) {
2310                 void *ph;
2311                 __u32 ts;
2312
2313                 ph = skb_shinfo(skb)->destructor_arg;
2314                 packet_dec_pending(&po->tx_ring);
2315
2316                 ts = __packet_set_timestamp(po, ph, skb);
2317                 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2318         }
2319
2320         sock_wfree(skb);
2321 }
2322
2323 static bool ll_header_truncated(const struct net_device *dev, int len)
2324 {
2325         /* net device doesn't like empty head */
2326         if (unlikely(len <= dev->hard_header_len)) {
2327                 net_warn_ratelimited("%s: packet size is too short (%d <= %d)\n",
2328                                      current->comm, len, dev->hard_header_len);
2329                 return true;
2330         }
2331
2332         return false;
2333 }
2334
2335 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2336                 void *frame, struct net_device *dev, int size_max,
2337                 __be16 proto, unsigned char *addr, int hlen)
2338 {
2339         union tpacket_uhdr ph;
2340         int to_write, offset, len, tp_len, nr_frags, len_max;
2341         struct socket *sock = po->sk.sk_socket;
2342         struct page *page;
2343         void *data;
2344         int err;
2345
2346         ph.raw = frame;
2347
2348         skb->protocol = proto;
2349         skb->dev = dev;
2350         skb->priority = po->sk.sk_priority;
2351         skb->mark = po->sk.sk_mark;
2352         sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags);
2353         skb_shinfo(skb)->destructor_arg = ph.raw;
2354
2355         switch (po->tp_version) {
2356         case TPACKET_V2:
2357                 tp_len = ph.h2->tp_len;
2358                 break;
2359         default:
2360                 tp_len = ph.h1->tp_len;
2361                 break;
2362         }
2363         if (unlikely(tp_len > size_max)) {
2364                 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2365                 return -EMSGSIZE;
2366         }
2367
2368         skb_reserve(skb, hlen);
2369         skb_reset_network_header(skb);
2370
2371         if (!packet_use_direct_xmit(po))
2372                 skb_probe_transport_header(skb, 0);
2373         if (unlikely(po->tp_tx_has_off)) {
2374                 int off_min, off_max, off;
2375                 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2376                 off_max = po->tx_ring.frame_size - tp_len;
2377                 if (sock->type == SOCK_DGRAM) {
2378                         switch (po->tp_version) {
2379                         case TPACKET_V2:
2380                                 off = ph.h2->tp_net;
2381                                 break;
2382                         default:
2383                                 off = ph.h1->tp_net;
2384                                 break;
2385                         }
2386                 } else {
2387                         switch (po->tp_version) {
2388                         case TPACKET_V2:
2389                                 off = ph.h2->tp_mac;
2390                                 break;
2391                         default:
2392                                 off = ph.h1->tp_mac;
2393                                 break;
2394                         }
2395                 }
2396                 if (unlikely((off < off_min) || (off_max < off)))
2397                         return -EINVAL;
2398                 data = ph.raw + off;
2399         } else {
2400                 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
2401         }
2402         to_write = tp_len;
2403
2404         if (sock->type == SOCK_DGRAM) {
2405                 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2406                                 NULL, tp_len);
2407                 if (unlikely(err < 0))
2408                         return -EINVAL;
2409         } else if (dev->hard_header_len) {
2410                 if (ll_header_truncated(dev, tp_len))
2411                         return -EINVAL;
2412
2413                 skb_push(skb, dev->hard_header_len);
2414                 err = skb_store_bits(skb, 0, data,
2415                                 dev->hard_header_len);
2416                 if (unlikely(err))
2417                         return err;
2418
2419                 data += dev->hard_header_len;
2420                 to_write -= dev->hard_header_len;
2421         }
2422
2423         offset = offset_in_page(data);
2424         len_max = PAGE_SIZE - offset;
2425         len = ((to_write > len_max) ? len_max : to_write);
2426
2427         skb->data_len = to_write;
2428         skb->len += to_write;
2429         skb->truesize += to_write;
2430         atomic_add(to_write, &po->sk.sk_wmem_alloc);
2431
2432         while (likely(to_write)) {
2433                 nr_frags = skb_shinfo(skb)->nr_frags;
2434
2435                 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2436                         pr_err("Packet exceed the number of skb frags(%lu)\n",
2437                                MAX_SKB_FRAGS);
2438                         return -EFAULT;
2439                 }
2440
2441                 page = pgv_to_page(data);
2442                 data += len;
2443                 flush_dcache_page(page);
2444                 get_page(page);
2445                 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2446                 to_write -= len;
2447                 offset = 0;
2448                 len_max = PAGE_SIZE;
2449                 len = ((to_write > len_max) ? len_max : to_write);
2450         }
2451
2452         return tp_len;
2453 }
2454
2455 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2456 {
2457         struct sk_buff *skb;
2458         struct net_device *dev;
2459         __be16 proto;
2460         int err, reserve = 0;
2461         void *ph;
2462         DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2463         bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2464         int tp_len, size_max;
2465         unsigned char *addr;
2466         int len_sum = 0;
2467         int status = TP_STATUS_AVAILABLE;
2468         int hlen, tlen;
2469
2470         mutex_lock(&po->pg_vec_lock);
2471
2472         if (likely(saddr == NULL)) {
2473                 dev     = packet_cached_dev_get(po);
2474                 proto   = po->num;
2475                 addr    = NULL;
2476         } else {
2477                 err = -EINVAL;
2478                 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2479                         goto out;
2480                 if (msg->msg_namelen < (saddr->sll_halen
2481                                         + offsetof(struct sockaddr_ll,
2482                                                 sll_addr)))
2483                         goto out;
2484                 proto   = saddr->sll_protocol;
2485                 addr    = saddr->sll_addr;
2486                 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2487         }
2488
2489         err = -ENXIO;
2490         if (unlikely(dev == NULL))
2491                 goto out;
2492         err = -ENETDOWN;
2493         if (unlikely(!(dev->flags & IFF_UP)))
2494                 goto out_put;
2495
2496         reserve = dev->hard_header_len + VLAN_HLEN;
2497         size_max = po->tx_ring.frame_size
2498                 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2499
2500         if (size_max > dev->mtu + reserve)
2501                 size_max = dev->mtu + reserve;
2502
2503         do {
2504                 ph = packet_current_frame(po, &po->tx_ring,
2505                                           TP_STATUS_SEND_REQUEST);
2506                 if (unlikely(ph == NULL)) {
2507                         if (need_wait && need_resched())
2508                                 schedule();
2509                         continue;
2510                 }
2511
2512                 status = TP_STATUS_SEND_REQUEST;
2513                 hlen = LL_RESERVED_SPACE(dev);
2514                 tlen = dev->needed_tailroom;
2515                 skb = sock_alloc_send_skb(&po->sk,
2516                                 hlen + tlen + sizeof(struct sockaddr_ll),
2517                                 !need_wait, &err);
2518
2519                 if (unlikely(skb == NULL)) {
2520                         /* we assume the socket was initially writeable ... */
2521                         if (likely(len_sum > 0))
2522                                 err = len_sum;
2523                         goto out_status;
2524                 }
2525                 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2526                                           addr, hlen);
2527                 if (likely(tp_len >= 0) &&
2528                     tp_len > dev->mtu + dev->hard_header_len) {
2529                         struct ethhdr *ehdr;
2530                         /* Earlier code assumed this would be a VLAN pkt,
2531                          * double-check this now that we have the actual
2532                          * packet in hand.
2533                          */
2534
2535                         skb_reset_mac_header(skb);
2536                         ehdr = eth_hdr(skb);
2537                         if (ehdr->h_proto != htons(ETH_P_8021Q))
2538                                 tp_len = -EMSGSIZE;
2539                 }
2540                 if (unlikely(tp_len < 0)) {
2541                         if (po->tp_loss) {
2542                                 __packet_set_status(po, ph,
2543                                                 TP_STATUS_AVAILABLE);
2544                                 packet_increment_head(&po->tx_ring);
2545                                 kfree_skb(skb);
2546                                 continue;
2547                         } else {
2548                                 status = TP_STATUS_WRONG_FORMAT;
2549                                 err = tp_len;
2550                                 goto out_status;
2551                         }
2552                 }
2553
2554                 packet_pick_tx_queue(dev, skb);
2555
2556                 skb->destructor = tpacket_destruct_skb;
2557                 __packet_set_status(po, ph, TP_STATUS_SENDING);
2558                 packet_inc_pending(&po->tx_ring);
2559
2560                 status = TP_STATUS_SEND_REQUEST;
2561                 err = po->xmit(skb);
2562                 if (unlikely(err > 0)) {
2563                         err = net_xmit_errno(err);
2564                         if (err && __packet_get_status(po, ph) ==
2565                                    TP_STATUS_AVAILABLE) {
2566                                 /* skb was destructed already */
2567                                 skb = NULL;
2568                                 goto out_status;
2569                         }
2570                         /*
2571                          * skb was dropped but not destructed yet;
2572                          * let's treat it like congestion or err < 0
2573                          */
2574                         err = 0;
2575                 }
2576                 packet_increment_head(&po->tx_ring);
2577                 len_sum += tp_len;
2578         } while (likely((ph != NULL) ||
2579                 /* Note: packet_read_pending() might be slow if we have
2580                  * to call it as it's per_cpu variable, but in fast-path
2581                  * we already short-circuit the loop with the first
2582                  * condition, and luckily don't have to go that path
2583                  * anyway.
2584                  */
2585                  (need_wait && packet_read_pending(&po->tx_ring))));
2586
2587         err = len_sum;
2588         goto out_put;
2589
2590 out_status:
2591         __packet_set_status(po, ph, status);
2592         kfree_skb(skb);
2593 out_put:
2594         dev_put(dev);
2595 out:
2596         mutex_unlock(&po->pg_vec_lock);
2597         return err;
2598 }
2599
2600 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2601                                         size_t reserve, size_t len,
2602                                         size_t linear, int noblock,
2603                                         int *err)
2604 {
2605         struct sk_buff *skb;
2606
2607         /* Under a page?  Don't bother with paged skb. */
2608         if (prepad + len < PAGE_SIZE || !linear)
2609                 linear = len;
2610
2611         skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2612                                    err, 0);
2613         if (!skb)
2614                 return NULL;
2615
2616         skb_reserve(skb, reserve);
2617         skb_put(skb, linear);
2618         skb->data_len = len - linear;
2619         skb->len += len - linear;
2620
2621         return skb;
2622 }
2623
2624 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2625 {
2626         struct sock *sk = sock->sk;
2627         DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2628         struct sk_buff *skb;
2629         struct net_device *dev;
2630         __be16 proto;
2631         unsigned char *addr;
2632         int err, reserve = 0;
2633         struct sockcm_cookie sockc;
2634         struct virtio_net_hdr vnet_hdr = { 0 };
2635         int offset = 0;
2636         int vnet_hdr_len;
2637         struct packet_sock *po = pkt_sk(sk);
2638         unsigned short gso_type = 0;
2639         int hlen, tlen;
2640         int extra_len = 0;
2641         ssize_t n;
2642
2643         /*
2644          *      Get and verify the address.
2645          */
2646
2647         if (likely(saddr == NULL)) {
2648                 dev     = packet_cached_dev_get(po);
2649                 proto   = po->num;
2650                 addr    = NULL;
2651         } else {
2652                 err = -EINVAL;
2653                 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2654                         goto out;
2655                 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2656                         goto out;
2657                 proto   = saddr->sll_protocol;
2658                 addr    = saddr->sll_addr;
2659                 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2660         }
2661
2662         err = -ENXIO;
2663         if (unlikely(dev == NULL))
2664                 goto out_unlock;
2665         err = -ENETDOWN;
2666         if (unlikely(!(dev->flags & IFF_UP)))
2667                 goto out_unlock;
2668
2669         sockc.mark = sk->sk_mark;
2670         if (msg->msg_controllen) {
2671                 err = sock_cmsg_send(sk, msg, &sockc);
2672                 if (unlikely(err))
2673                         goto out_unlock;
2674         }
2675
2676         if (sock->type == SOCK_RAW)
2677                 reserve = dev->hard_header_len;
2678         if (po->has_vnet_hdr) {
2679                 vnet_hdr_len = sizeof(vnet_hdr);
2680
2681                 err = -EINVAL;
2682                 if (len < vnet_hdr_len)
2683                         goto out_unlock;
2684
2685                 len -= vnet_hdr_len;
2686
2687                 err = -EFAULT;
2688                 n = copy_from_iter(&vnet_hdr, vnet_hdr_len, &msg->msg_iter);
2689                 if (n != vnet_hdr_len)
2690                         goto out_unlock;
2691
2692                 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2693                     (__virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) +
2694                      __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2 >
2695                       __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len)))
2696                         vnet_hdr.hdr_len = __cpu_to_virtio16(vio_le(),
2697                                  __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) +
2698                                 __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2);
2699
2700                 err = -EINVAL;
2701                 if (__virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len) > len)
2702                         goto out_unlock;
2703
2704                 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2705                         switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2706                         case VIRTIO_NET_HDR_GSO_TCPV4:
2707                                 gso_type = SKB_GSO_TCPV4;
2708                                 break;
2709                         case VIRTIO_NET_HDR_GSO_TCPV6:
2710                                 gso_type = SKB_GSO_TCPV6;
2711                                 break;
2712                         case VIRTIO_NET_HDR_GSO_UDP:
2713                                 gso_type = SKB_GSO_UDP;
2714                                 break;
2715                         default:
2716                                 goto out_unlock;
2717                         }
2718
2719                         if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2720                                 gso_type |= SKB_GSO_TCP_ECN;
2721
2722                         if (vnet_hdr.gso_size == 0)
2723                                 goto out_unlock;
2724
2725                 }
2726         }
2727
2728         if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2729                 if (!netif_supports_nofcs(dev)) {
2730                         err = -EPROTONOSUPPORT;
2731                         goto out_unlock;
2732                 }
2733                 extra_len = 4; /* We're doing our own CRC */
2734         }
2735
2736         err = -EMSGSIZE;
2737         if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2738                 goto out_unlock;
2739
2740         err = -ENOBUFS;
2741         hlen = LL_RESERVED_SPACE(dev);
2742         tlen = dev->needed_tailroom;
2743         skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
2744                                __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len),
2745                                msg->msg_flags & MSG_DONTWAIT, &err);
2746         if (skb == NULL)
2747                 goto out_unlock;
2748
2749         skb_set_network_header(skb, reserve);
2750
2751         err = -EINVAL;
2752         if (sock->type == SOCK_DGRAM) {
2753                 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2754                 if (unlikely(offset < 0))
2755                         goto out_free;
2756         } else {
2757                 if (ll_header_truncated(dev, len))
2758                         goto out_free;
2759         }
2760
2761         /* Returns -EFAULT on error */
2762         err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2763         if (err)
2764                 goto out_free;
2765
2766         sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2767
2768         if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
2769                 /* Earlier code assumed this would be a VLAN pkt,
2770                  * double-check this now that we have the actual
2771                  * packet in hand.
2772                  */
2773                 struct ethhdr *ehdr;
2774                 skb_reset_mac_header(skb);
2775                 ehdr = eth_hdr(skb);
2776                 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2777                         err = -EMSGSIZE;
2778                         goto out_free;
2779                 }
2780         }
2781
2782         skb->protocol = proto;
2783         skb->dev = dev;
2784         skb->priority = sk->sk_priority;
2785         skb->mark = sockc.mark;
2786
2787         packet_pick_tx_queue(dev, skb);
2788
2789         if (po->has_vnet_hdr) {
2790                 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2791                         u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start);
2792                         u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset);
2793                         if (!skb_partial_csum_set(skb, s, o)) {
2794                                 err = -EINVAL;
2795                                 goto out_free;
2796                         }
2797                 }
2798
2799                 skb_shinfo(skb)->gso_size =
2800                         __virtio16_to_cpu(vio_le(), vnet_hdr.gso_size);
2801                 skb_shinfo(skb)->gso_type = gso_type;
2802
2803                 /* Header must be checked, and gso_segs computed. */
2804                 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2805                 skb_shinfo(skb)->gso_segs = 0;
2806
2807                 len += vnet_hdr_len;
2808         }
2809
2810         if (!packet_use_direct_xmit(po))
2811                 skb_probe_transport_header(skb, reserve);
2812         if (unlikely(extra_len == 4))
2813                 skb->no_fcs = 1;
2814
2815         err = po->xmit(skb);
2816         if (err > 0 && (err = net_xmit_errno(err)) != 0)
2817                 goto out_unlock;
2818
2819         dev_put(dev);
2820
2821         return len;
2822
2823 out_free:
2824         kfree_skb(skb);
2825 out_unlock:
2826         if (dev)
2827                 dev_put(dev);
2828 out:
2829         return err;
2830 }
2831
2832 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2833 {
2834         struct sock *sk = sock->sk;
2835         struct packet_sock *po = pkt_sk(sk);
2836
2837         if (po->tx_ring.pg_vec)
2838                 return tpacket_snd(po, msg);
2839         else
2840                 return packet_snd(sock, msg, len);
2841 }
2842
2843 /*
2844  *      Close a PACKET socket. This is fairly simple. We immediately go
2845  *      to 'closed' state and remove our protocol entry in the device list.
2846  */
2847
2848 static int packet_release(struct socket *sock)
2849 {
2850         struct sock *sk = sock->sk;
2851         struct packet_sock *po;
2852         struct net *net;
2853         union tpacket_req_u req_u;
2854
2855         if (!sk)
2856                 return 0;
2857
2858         net = sock_net(sk);
2859         po = pkt_sk(sk);
2860
2861         mutex_lock(&net->packet.sklist_lock);
2862         sk_del_node_init_rcu(sk);
2863         mutex_unlock(&net->packet.sklist_lock);
2864
2865         preempt_disable();
2866         sock_prot_inuse_add(net, sk->sk_prot, -1);
2867         preempt_enable();
2868
2869         spin_lock(&po->bind_lock);
2870         unregister_prot_hook(sk, false);
2871         packet_cached_dev_reset(po);
2872
2873         if (po->prot_hook.dev) {
2874                 dev_put(po->prot_hook.dev);
2875                 po->prot_hook.dev = NULL;
2876         }
2877         spin_unlock(&po->bind_lock);
2878
2879         packet_flush_mclist(sk);
2880
2881         if (po->rx_ring.pg_vec) {
2882                 memset(&req_u, 0, sizeof(req_u));
2883                 packet_set_ring(sk, &req_u, 1, 0);
2884         }
2885
2886         if (po->tx_ring.pg_vec) {
2887                 memset(&req_u, 0, sizeof(req_u));
2888                 packet_set_ring(sk, &req_u, 1, 1);
2889         }
2890
2891         fanout_release(sk);
2892
2893         synchronize_net();
2894         /*
2895          *      Now the socket is dead. No more input will appear.
2896          */
2897         sock_orphan(sk);
2898         sock->sk = NULL;
2899
2900         /* Purge queues */
2901
2902         skb_queue_purge(&sk->sk_receive_queue);
2903         packet_free_pending(po);
2904         sk_refcnt_debug_release(sk);
2905
2906         sock_put(sk);
2907         return 0;
2908 }
2909
2910 /*
2911  *      Attach a packet hook.
2912  */
2913
2914 static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
2915                           __be16 proto)
2916 {
2917         struct packet_sock *po = pkt_sk(sk);
2918         struct net_device *dev_curr;
2919         __be16 proto_curr;
2920         bool need_rehook;
2921         struct net_device *dev = NULL;
2922         int ret = 0;
2923         bool unlisted = false;
2924
2925         if (po->fanout)
2926                 return -EINVAL;
2927
2928         lock_sock(sk);
2929         spin_lock(&po->bind_lock);
2930         rcu_read_lock();
2931
2932         if (name) {
2933                 dev = dev_get_by_name_rcu(sock_net(sk), name);
2934                 if (!dev) {
2935                         ret = -ENODEV;
2936                         goto out_unlock;
2937                 }
2938         } else if (ifindex) {
2939                 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
2940                 if (!dev) {
2941                         ret = -ENODEV;
2942                         goto out_unlock;
2943                 }
2944         }
2945
2946         if (dev)
2947                 dev_hold(dev);
2948
2949         proto_curr = po->prot_hook.type;
2950         dev_curr = po->prot_hook.dev;
2951
2952         need_rehook = proto_curr != proto || dev_curr != dev;
2953
2954         if (need_rehook) {
2955                 if (po->running) {
2956                         rcu_read_unlock();
2957                         __unregister_prot_hook(sk, true);
2958                         rcu_read_lock();
2959                         dev_curr = po->prot_hook.dev;
2960                         if (dev)
2961                                 unlisted = !dev_get_by_index_rcu(sock_net(sk),
2962                                                                  dev->ifindex);
2963                 }
2964
2965                 po->num = proto;
2966                 po->prot_hook.type = proto;
2967
2968                 if (unlikely(unlisted)) {
2969                         dev_put(dev);
2970                         po->prot_hook.dev = NULL;
2971                         po->ifindex = -1;
2972                         packet_cached_dev_reset(po);
2973                 } else {
2974                         po->prot_hook.dev = dev;
2975                         po->ifindex = dev ? dev->ifindex : 0;
2976                         packet_cached_dev_assign(po, dev);
2977                 }
2978         }
2979         if (dev_curr)
2980                 dev_put(dev_curr);
2981
2982         if (proto == 0 || !need_rehook)
2983                 goto out_unlock;
2984
2985         if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
2986                 register_prot_hook(sk);
2987         } else {
2988                 sk->sk_err = ENETDOWN;
2989                 if (!sock_flag(sk, SOCK_DEAD))
2990                         sk->sk_error_report(sk);
2991         }
2992
2993 out_unlock:
2994         rcu_read_unlock();
2995         spin_unlock(&po->bind_lock);
2996         release_sock(sk);
2997         return ret;
2998 }
2999
3000 /*
3001  *      Bind a packet socket to a device
3002  */
3003
3004 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3005                             int addr_len)
3006 {
3007         struct sock *sk = sock->sk;
3008         char name[15];
3009
3010         /*
3011          *      Check legality
3012          */
3013
3014         if (addr_len != sizeof(struct sockaddr))
3015                 return -EINVAL;
3016         strlcpy(name, uaddr->sa_data, sizeof(name));
3017
3018         return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3019 }
3020
3021 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3022 {
3023         struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3024         struct sock *sk = sock->sk;
3025
3026         /*
3027          *      Check legality
3028          */
3029
3030         if (addr_len < sizeof(struct sockaddr_ll))
3031                 return -EINVAL;
3032         if (sll->sll_family != AF_PACKET)
3033                 return -EINVAL;
3034
3035         return packet_do_bind(sk, NULL, sll->sll_ifindex,
3036                               sll->sll_protocol ? : pkt_sk(sk)->num);
3037 }
3038
3039 static struct proto packet_proto = {
3040         .name     = "PACKET",
3041         .owner    = THIS_MODULE,
3042         .obj_size = sizeof(struct packet_sock),
3043 };
3044
3045 /*
3046  *      Create a packet of type SOCK_PACKET.
3047  */
3048
3049 static int packet_create(struct net *net, struct socket *sock, int protocol,
3050                          int kern)
3051 {
3052         struct sock *sk;
3053         struct packet_sock *po;
3054         __be16 proto = (__force __be16)protocol; /* weird, but documented */
3055         int err;
3056
3057         if (!ns_capable(net->user_ns, CAP_NET_RAW))
3058                 return -EPERM;
3059         if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3060             sock->type != SOCK_PACKET)
3061                 return -ESOCKTNOSUPPORT;
3062
3063         sock->state = SS_UNCONNECTED;
3064
3065         err = -ENOBUFS;
3066         sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3067         if (sk == NULL)
3068                 goto out;
3069
3070         sock->ops = &packet_ops;
3071         if (sock->type == SOCK_PACKET)
3072                 sock->ops = &packet_ops_spkt;
3073
3074         sock_init_data(sock, sk);
3075
3076         po = pkt_sk(sk);
3077         sk->sk_family = PF_PACKET;
3078         po->num = proto;
3079         po->xmit = dev_queue_xmit;
3080
3081         err = packet_alloc_pending(po);
3082         if (err)
3083                 goto out2;
3084
3085         packet_cached_dev_reset(po);
3086
3087         sk->sk_destruct = packet_sock_destruct;
3088         sk_refcnt_debug_inc(sk);
3089
3090         /*
3091          *      Attach a protocol block
3092          */
3093
3094         spin_lock_init(&po->bind_lock);
3095         mutex_init(&po->pg_vec_lock);
3096         po->rollover = NULL;
3097         po->prot_hook.func = packet_rcv;
3098
3099         if (sock->type == SOCK_PACKET)
3100                 po->prot_hook.func = packet_rcv_spkt;
3101
3102         po->prot_hook.af_packet_priv = sk;
3103
3104         if (proto) {
3105                 po->prot_hook.type = proto;
3106                 register_prot_hook(sk);
3107         }
3108
3109         mutex_lock(&net->packet.sklist_lock);
3110         sk_add_node_rcu(sk, &net->packet.sklist);
3111         mutex_unlock(&net->packet.sklist_lock);
3112
3113         preempt_disable();
3114         sock_prot_inuse_add(net, &packet_proto, 1);
3115         preempt_enable();
3116
3117         return 0;
3118 out2:
3119         sk_free(sk);
3120 out:
3121         return err;
3122 }
3123
3124 /*
3125  *      Pull a packet from our receive queue and hand it to the user.
3126  *      If necessary we block.
3127  */
3128
3129 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3130                           int flags)
3131 {
3132         struct sock *sk = sock->sk;
3133         struct sk_buff *skb;
3134         int copied, err;
3135         int vnet_hdr_len = 0;
3136         unsigned int origlen = 0;
3137
3138         err = -EINVAL;
3139         if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3140                 goto out;
3141
3142 #if 0
3143         /* What error should we return now? EUNATTACH? */
3144         if (pkt_sk(sk)->ifindex < 0)
3145                 return -ENODEV;
3146 #endif
3147
3148         if (flags & MSG_ERRQUEUE) {
3149                 err = sock_recv_errqueue(sk, msg, len,
3150                                          SOL_PACKET, PACKET_TX_TIMESTAMP);
3151                 goto out;
3152         }
3153
3154         /*
3155          *      Call the generic datagram receiver. This handles all sorts
3156          *      of horrible races and re-entrancy so we can forget about it
3157          *      in the protocol layers.
3158          *
3159          *      Now it will return ENETDOWN, if device have just gone down,
3160          *      but then it will block.
3161          */
3162
3163         skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3164
3165         /*
3166          *      An error occurred so return it. Because skb_recv_datagram()
3167          *      handles the blocking we don't see and worry about blocking
3168          *      retries.
3169          */
3170
3171         if (skb == NULL)
3172                 goto out;
3173
3174         if (pkt_sk(sk)->pressure)
3175                 packet_rcv_has_room(pkt_sk(sk), NULL);
3176
3177         if (pkt_sk(sk)->has_vnet_hdr) {
3178                 struct virtio_net_hdr vnet_hdr = { 0 };
3179
3180                 err = -EINVAL;
3181                 vnet_hdr_len = sizeof(vnet_hdr);
3182                 if (len < vnet_hdr_len)
3183                         goto out_free;
3184
3185                 len -= vnet_hdr_len;
3186
3187                 if (skb_is_gso(skb)) {
3188                         struct skb_shared_info *sinfo = skb_shinfo(skb);
3189
3190                         /* This is a hint as to how much should be linear. */
3191                         vnet_hdr.hdr_len =
3192                                 __cpu_to_virtio16(vio_le(), skb_headlen(skb));
3193                         vnet_hdr.gso_size =
3194                                 __cpu_to_virtio16(vio_le(), sinfo->gso_size);
3195                         if (sinfo->gso_type & SKB_GSO_TCPV4)
3196                                 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
3197                         else if (sinfo->gso_type & SKB_GSO_TCPV6)
3198                                 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
3199                         else if (sinfo->gso_type & SKB_GSO_UDP)
3200                                 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
3201                         else if (sinfo->gso_type & SKB_GSO_FCOE)
3202                                 goto out_free;
3203                         else
3204                                 BUG();
3205                         if (sinfo->gso_type & SKB_GSO_TCP_ECN)
3206                                 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
3207                 } else
3208                         vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
3209
3210                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3211                         vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
3212                         vnet_hdr.csum_start = __cpu_to_virtio16(vio_le(),
3213                                           skb_checksum_start_offset(skb));
3214                         vnet_hdr.csum_offset = __cpu_to_virtio16(vio_le(),
3215                                                          skb->csum_offset);
3216                 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3217                         vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
3218                 } /* else everything is zero */
3219
3220                 err = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_len);
3221                 if (err < 0)
3222                         goto out_free;
3223         }
3224
3225         /* You lose any data beyond the buffer you gave. If it worries
3226          * a user program they can ask the device for its MTU
3227          * anyway.
3228          */
3229         copied = skb->len;
3230         if (copied > len) {
3231                 copied = len;
3232                 msg->msg_flags |= MSG_TRUNC;
3233         }
3234
3235         err = skb_copy_datagram_msg(skb, 0, msg, copied);
3236         if (err)
3237                 goto out_free;
3238
3239         if (sock->type != SOCK_PACKET) {
3240                 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3241
3242                 /* Original length was stored in sockaddr_ll fields */
3243                 origlen = PACKET_SKB_CB(skb)->sa.origlen;
3244                 sll->sll_family = AF_PACKET;
3245                 sll->sll_protocol = skb->protocol;
3246         }
3247
3248         sock_recv_ts_and_drops(msg, sk, skb);
3249
3250         if (msg->msg_name) {
3251                 /* If the address length field is there to be filled
3252                  * in, we fill it in now.
3253                  */
3254                 if (sock->type == SOCK_PACKET) {
3255                         __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3256                         msg->msg_namelen = sizeof(struct sockaddr_pkt);
3257                 } else {
3258                         struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3259
3260                         msg->msg_namelen = sll->sll_halen +
3261                                 offsetof(struct sockaddr_ll, sll_addr);
3262                 }
3263                 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
3264                        msg->msg_namelen);
3265         }
3266
3267         if (pkt_sk(sk)->auxdata) {
3268                 struct tpacket_auxdata aux;
3269
3270                 aux.tp_status = TP_STATUS_USER;
3271                 if (skb->ip_summed == CHECKSUM_PARTIAL)
3272                         aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3273                 else if (skb->pkt_type != PACKET_OUTGOING &&
3274                          (skb->ip_summed == CHECKSUM_COMPLETE ||
3275                           skb_csum_unnecessary(skb)))
3276                         aux.tp_status |= TP_STATUS_CSUM_VALID;
3277
3278                 aux.tp_len = origlen;
3279                 aux.tp_snaplen = skb->len;
3280                 aux.tp_mac = 0;
3281                 aux.tp_net = skb_network_offset(skb);
3282                 if (skb_vlan_tag_present(skb)) {
3283                         aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3284                         aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3285                         aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3286                 } else {
3287                         aux.tp_vlan_tci = 0;
3288                         aux.tp_vlan_tpid = 0;
3289                 }
3290                 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3291         }
3292
3293         /*
3294          *      Free or return the buffer as appropriate. Again this
3295          *      hides all the races and re-entrancy issues from us.
3296          */
3297         err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3298
3299 out_free:
3300         skb_free_datagram(sk, skb);
3301 out:
3302         return err;
3303 }
3304
3305 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3306                                int *uaddr_len, int peer)
3307 {
3308         struct net_device *dev;
3309         struct sock *sk = sock->sk;
3310
3311         if (peer)
3312                 return -EOPNOTSUPP;
3313
3314         uaddr->sa_family = AF_PACKET;
3315         memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3316         rcu_read_lock();
3317         dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3318         if (dev)
3319                 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3320         rcu_read_unlock();
3321         *uaddr_len = sizeof(*uaddr);
3322
3323         return 0;
3324 }
3325
3326 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3327                           int *uaddr_len, int peer)
3328 {
3329         struct net_device *dev;
3330         struct sock *sk = sock->sk;
3331         struct packet_sock *po = pkt_sk(sk);
3332         DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3333
3334         if (peer)
3335                 return -EOPNOTSUPP;
3336
3337         sll->sll_family = AF_PACKET;
3338         sll->sll_ifindex = po->ifindex;
3339         sll->sll_protocol = po->num;
3340         sll->sll_pkttype = 0;
3341         rcu_read_lock();
3342         dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3343         if (dev) {
3344                 sll->sll_hatype = dev->type;
3345                 sll->sll_halen = dev->addr_len;
3346                 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3347         } else {
3348                 sll->sll_hatype = 0;    /* Bad: we have no ARPHRD_UNSPEC */
3349                 sll->sll_halen = 0;
3350         }
3351         rcu_read_unlock();
3352         *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3353
3354         return 0;
3355 }
3356
3357 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3358                          int what)
3359 {
3360         switch (i->type) {
3361         case PACKET_MR_MULTICAST:
3362                 if (i->alen != dev->addr_len)
3363                         return -EINVAL;
3364                 if (what > 0)
3365                         return dev_mc_add(dev, i->addr);
3366                 else
3367                         return dev_mc_del(dev, i->addr);
3368                 break;
3369         case PACKET_MR_PROMISC:
3370                 return dev_set_promiscuity(dev, what);
3371         case PACKET_MR_ALLMULTI:
3372                 return dev_set_allmulti(dev, what);
3373         case PACKET_MR_UNICAST:
3374                 if (i->alen != dev->addr_len)
3375                         return -EINVAL;
3376                 if (what > 0)
3377                         return dev_uc_add(dev, i->addr);
3378                 else
3379                         return dev_uc_del(dev, i->addr);
3380                 break;
3381         default:
3382                 break;
3383         }
3384         return 0;
3385 }
3386
3387 static void packet_dev_mclist_delete(struct net_device *dev,
3388                                      struct packet_mclist **mlp)
3389 {
3390         struct packet_mclist *ml;
3391
3392         while ((ml = *mlp) != NULL) {
3393                 if (ml->ifindex == dev->ifindex) {
3394                         packet_dev_mc(dev, ml, -1);
3395                         *mlp = ml->next;
3396                         kfree(ml);
3397                 } else
3398                         mlp = &ml->next;
3399         }
3400 }
3401
3402 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3403 {
3404         struct packet_sock *po = pkt_sk(sk);
3405         struct packet_mclist *ml, *i;
3406         struct net_device *dev;
3407         int err;
3408
3409         rtnl_lock();
3410
3411         err = -ENODEV;
3412         dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3413         if (!dev)
3414                 goto done;
3415
3416         err = -EINVAL;
3417         if (mreq->mr_alen > dev->addr_len)
3418                 goto done;
3419
3420         err = -ENOBUFS;
3421         i = kmalloc(sizeof(*i), GFP_KERNEL);
3422         if (i == NULL)
3423                 goto done;
3424
3425         err = 0;
3426         for (ml = po->mclist; ml; ml = ml->next) {
3427                 if (ml->ifindex == mreq->mr_ifindex &&
3428                     ml->type == mreq->mr_type &&
3429                     ml->alen == mreq->mr_alen &&
3430                     memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3431                         ml->count++;
3432                         /* Free the new element ... */
3433                         kfree(i);
3434                         goto done;
3435                 }
3436         }
3437
3438         i->type = mreq->mr_type;
3439         i->ifindex = mreq->mr_ifindex;
3440         i->alen = mreq->mr_alen;
3441         memcpy(i->addr, mreq->mr_address, i->alen);
3442         i->count = 1;
3443         i->next = po->mclist;
3444         po->mclist = i;
3445         err = packet_dev_mc(dev, i, 1);
3446         if (err) {
3447                 po->mclist = i->next;
3448                 kfree(i);
3449         }
3450
3451 done:
3452         rtnl_unlock();
3453         return err;
3454 }
3455
3456 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3457 {
3458         struct packet_mclist *ml, **mlp;
3459
3460         rtnl_lock();
3461
3462         for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3463                 if (ml->ifindex == mreq->mr_ifindex &&
3464                     ml->type == mreq->mr_type &&
3465                     ml->alen == mreq->mr_alen &&
3466                     memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3467                         if (--ml->count == 0) {
3468                                 struct net_device *dev;
3469                                 *mlp = ml->next;
3470                                 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3471                                 if (dev)
3472                                         packet_dev_mc(dev, ml, -1);
3473                                 kfree(ml);
3474                         }
3475                         break;
3476                 }
3477         }
3478         rtnl_unlock();
3479         return 0;
3480 }
3481
3482 static void packet_flush_mclist(struct sock *sk)
3483 {
3484         struct packet_sock *po = pkt_sk(sk);
3485         struct packet_mclist *ml;
3486
3487         if (!po->mclist)
3488                 return;
3489
3490         rtnl_lock();
3491         while ((ml = po->mclist) != NULL) {
3492                 struct net_device *dev;
3493
3494                 po->mclist = ml->next;
3495                 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3496                 if (dev != NULL)
3497                         packet_dev_mc(dev, ml, -1);
3498                 kfree(ml);
3499         }
3500         rtnl_unlock();
3501 }
3502
3503 static int
3504 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
3505 {
3506         struct sock *sk = sock->sk;
3507         struct packet_sock *po = pkt_sk(sk);
3508         int ret;
3509
3510         if (level != SOL_PACKET)
3511                 return -ENOPROTOOPT;
3512
3513         switch (optname) {
3514         case PACKET_ADD_MEMBERSHIP:
3515         case PACKET_DROP_MEMBERSHIP:
3516         {
3517                 struct packet_mreq_max mreq;
3518                 int len = optlen;
3519                 memset(&mreq, 0, sizeof(mreq));
3520                 if (len < sizeof(struct packet_mreq))
3521                         return -EINVAL;
3522                 if (len > sizeof(mreq))
3523                         len = sizeof(mreq);
3524                 if (copy_from_user(&mreq, optval, len))
3525                         return -EFAULT;
3526                 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3527                         return -EINVAL;
3528                 if (optname == PACKET_ADD_MEMBERSHIP)
3529                         ret = packet_mc_add(sk, &mreq);
3530                 else
3531                         ret = packet_mc_drop(sk, &mreq);
3532                 return ret;
3533         }
3534
3535         case PACKET_RX_RING:
3536         case PACKET_TX_RING:
3537         {
3538                 union tpacket_req_u req_u;
3539                 int len;
3540
3541                 switch (po->tp_version) {
3542                 case TPACKET_V1:
3543                 case TPACKET_V2:
3544                         len = sizeof(req_u.req);
3545                         break;
3546                 case TPACKET_V3:
3547                 default:
3548                         len = sizeof(req_u.req3);
3549                         break;
3550                 }
3551                 if (optlen < len)
3552                         return -EINVAL;
3553                 if (pkt_sk(sk)->has_vnet_hdr)
3554                         return -EINVAL;
3555                 if (copy_from_user(&req_u.req, optval, len))
3556                         return -EFAULT;
3557                 return packet_set_ring(sk, &req_u, 0,
3558                         optname == PACKET_TX_RING);
3559         }
3560         case PACKET_COPY_THRESH:
3561         {
3562                 int val;
3563
3564                 if (optlen != sizeof(val))
3565                         return -EINVAL;
3566                 if (copy_from_user(&val, optval, sizeof(val)))
3567                         return -EFAULT;
3568
3569                 pkt_sk(sk)->copy_thresh = val;
3570                 return 0;
3571         }
3572         case PACKET_VERSION:
3573         {
3574                 int val;
3575
3576                 if (optlen != sizeof(val))
3577                         return -EINVAL;
3578                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3579                         return -EBUSY;
3580                 if (copy_from_user(&val, optval, sizeof(val)))
3581                         return -EFAULT;
3582                 switch (val) {
3583                 case TPACKET_V1:
3584                 case TPACKET_V2:
3585                 case TPACKET_V3:
3586                         po->tp_version = val;
3587                         return 0;
3588                 default:
3589                         return -EINVAL;
3590                 }
3591         }
3592         case PACKET_RESERVE:
3593         {
3594                 unsigned int val;
3595
3596                 if (optlen != sizeof(val))
3597                         return -EINVAL;
3598                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3599                         return -EBUSY;
3600                 if (copy_from_user(&val, optval, sizeof(val)))
3601                         return -EFAULT;
3602                 po->tp_reserve = val;
3603                 return 0;
3604         }
3605         case PACKET_LOSS:
3606         {
3607                 unsigned int val;
3608
3609                 if (optlen != sizeof(val))
3610                         return -EINVAL;
3611                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3612                         return -EBUSY;
3613                 if (copy_from_user(&val, optval, sizeof(val)))
3614                         return -EFAULT;
3615                 po->tp_loss = !!val;
3616                 return 0;
3617         }
3618         case PACKET_AUXDATA:
3619         {
3620                 int val;
3621
3622                 if (optlen < sizeof(val))
3623                         return -EINVAL;
3624                 if (copy_from_user(&val, optval, sizeof(val)))
3625                         return -EFAULT;
3626
3627                 po->auxdata = !!val;
3628                 return 0;
3629         }
3630         case PACKET_ORIGDEV:
3631         {
3632                 int val;
3633
3634                 if (optlen < sizeof(val))
3635                         return -EINVAL;
3636                 if (copy_from_user(&val, optval, sizeof(val)))
3637                         return -EFAULT;
3638
3639                 po->origdev = !!val;
3640                 return 0;
3641         }
3642         case PACKET_VNET_HDR:
3643         {
3644                 int val;
3645
3646                 if (sock->type != SOCK_RAW)
3647                         return -EINVAL;
3648                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3649                         return -EBUSY;
3650                 if (optlen < sizeof(val))
3651                         return -EINVAL;
3652                 if (copy_from_user(&val, optval, sizeof(val)))
3653                         return -EFAULT;
3654
3655                 po->has_vnet_hdr = !!val;
3656                 return 0;
3657         }
3658         case PACKET_TIMESTAMP:
3659         {
3660                 int val;
3661
3662                 if (optlen != sizeof(val))
3663                         return -EINVAL;
3664                 if (copy_from_user(&val, optval, sizeof(val)))
3665                         return -EFAULT;
3666
3667                 po->tp_tstamp = val;
3668                 return 0;
3669         }
3670         case PACKET_FANOUT:
3671         {
3672                 int val;
3673
3674                 if (optlen != sizeof(val))
3675                         return -EINVAL;
3676                 if (copy_from_user(&val, optval, sizeof(val)))
3677                         return -EFAULT;
3678
3679                 return fanout_add(sk, val & 0xffff, val >> 16);
3680         }
3681         case PACKET_FANOUT_DATA:
3682         {
3683                 if (!po->fanout)
3684                         return -EINVAL;
3685
3686                 return fanout_set_data(po, optval, optlen);
3687         }
3688         case PACKET_TX_HAS_OFF:
3689         {
3690                 unsigned int val;
3691
3692                 if (optlen != sizeof(val))
3693                         return -EINVAL;
3694                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3695                         return -EBUSY;
3696                 if (copy_from_user(&val, optval, sizeof(val)))
3697                         return -EFAULT;
3698                 po->tp_tx_has_off = !!val;
3699                 return 0;
3700         }
3701         case PACKET_QDISC_BYPASS:
3702         {
3703                 int val;
3704
3705                 if (optlen != sizeof(val))
3706                         return -EINVAL;
3707                 if (copy_from_user(&val, optval, sizeof(val)))
3708                         return -EFAULT;
3709
3710                 po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3711                 return 0;
3712         }
3713         default:
3714                 return -ENOPROTOOPT;
3715         }
3716 }
3717
3718 static int packet_getsockopt(struct socket *sock, int level, int optname,
3719                              char __user *optval, int __user *optlen)
3720 {
3721         int len;
3722         int val, lv = sizeof(val);
3723         struct sock *sk = sock->sk;
3724         struct packet_sock *po = pkt_sk(sk);
3725         void *data = &val;
3726         union tpacket_stats_u st;
3727         struct tpacket_rollover_stats rstats;
3728
3729         if (level != SOL_PACKET)
3730                 return -ENOPROTOOPT;
3731
3732         if (get_user(len, optlen))
3733                 return -EFAULT;
3734
3735         if (len < 0)
3736                 return -EINVAL;
3737
3738         switch (optname) {
3739         case PACKET_STATISTICS:
3740                 spin_lock_bh(&sk->sk_receive_queue.lock);
3741                 memcpy(&st, &po->stats, sizeof(st));
3742                 memset(&po->stats, 0, sizeof(po->stats));
3743                 spin_unlock_bh(&sk->sk_receive_queue.lock);
3744
3745                 if (po->tp_version == TPACKET_V3) {
3746                         lv = sizeof(struct tpacket_stats_v3);
3747                         st.stats3.tp_packets += st.stats3.tp_drops;
3748                         data = &st.stats3;
3749                 } else {
3750                         lv = sizeof(struct tpacket_stats);
3751                         st.stats1.tp_packets += st.stats1.tp_drops;
3752                         data = &st.stats1;
3753                 }
3754
3755                 break;
3756         case PACKET_AUXDATA:
3757                 val = po->auxdata;
3758                 break;
3759         case PACKET_ORIGDEV:
3760                 val = po->origdev;
3761                 break;
3762         case PACKET_VNET_HDR:
3763                 val = po->has_vnet_hdr;
3764                 break;
3765         case PACKET_VERSION:
3766                 val = po->tp_version;
3767                 break;
3768         case PACKET_HDRLEN:
3769                 if (len > sizeof(int))
3770                         len = sizeof(int);
3771                 if (copy_from_user(&val, optval, len))
3772                         return -EFAULT;
3773                 switch (val) {
3774                 case TPACKET_V1:
3775                         val = sizeof(struct tpacket_hdr);
3776                         break;
3777                 case TPACKET_V2:
3778                         val = sizeof(struct tpacket2_hdr);
3779                         break;
3780                 case TPACKET_V3:
3781                         val = sizeof(struct tpacket3_hdr);
3782                         break;
3783                 default:
3784                         return -EINVAL;
3785                 }
3786                 break;
3787         case PACKET_RESERVE:
3788                 val = po->tp_reserve;
3789                 break;
3790         case PACKET_LOSS:
3791                 val = po->tp_loss;
3792                 break;
3793         case PACKET_TIMESTAMP:
3794                 val = po->tp_tstamp;
3795                 break;
3796         case PACKET_FANOUT:
3797                 val = (po->fanout ?
3798                        ((u32)po->fanout->id |
3799                         ((u32)po->fanout->type << 16) |
3800                         ((u32)po->fanout->flags << 24)) :
3801                        0);
3802                 break;
3803         case PACKET_ROLLOVER_STATS:
3804                 if (!po->rollover)
3805                         return -EINVAL;
3806                 rstats.tp_all = atomic_long_read(&po->rollover->num);
3807                 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
3808                 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
3809                 data = &rstats;
3810                 lv = sizeof(rstats);
3811                 break;
3812         case PACKET_TX_HAS_OFF:
3813                 val = po->tp_tx_has_off;
3814                 break;
3815         case PACKET_QDISC_BYPASS:
3816                 val = packet_use_direct_xmit(po);
3817                 break;
3818         default:
3819                 return -ENOPROTOOPT;
3820         }
3821
3822         if (len > lv)
3823                 len = lv;
3824         if (put_user(len, optlen))
3825                 return -EFAULT;
3826         if (copy_to_user(optval, data, len))
3827                 return -EFAULT;
3828         return 0;
3829 }
3830
3831
3832 static int packet_notifier(struct notifier_block *this,
3833                            unsigned long msg, void *ptr)
3834 {
3835         struct sock *sk;
3836         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3837         struct net *net = dev_net(dev);
3838
3839         rcu_read_lock();
3840         sk_for_each_rcu(sk, &net->packet.sklist) {
3841                 struct packet_sock *po = pkt_sk(sk);
3842
3843                 switch (msg) {
3844                 case NETDEV_UNREGISTER:
3845                         if (po->mclist)
3846                                 packet_dev_mclist_delete(dev, &po->mclist);
3847                         /* fallthrough */
3848
3849                 case NETDEV_DOWN:
3850                         if (dev->ifindex == po->ifindex) {
3851                                 spin_lock(&po->bind_lock);
3852                                 if (po->running) {
3853                                         __unregister_prot_hook(sk, false);
3854                                         sk->sk_err = ENETDOWN;
3855                                         if (!sock_flag(sk, SOCK_DEAD))
3856                                                 sk->sk_error_report(sk);
3857                                 }
3858                                 if (msg == NETDEV_UNREGISTER) {
3859                                         packet_cached_dev_reset(po);
3860                                         po->ifindex = -1;
3861                                         if (po->prot_hook.dev)
3862                                                 dev_put(po->prot_hook.dev);
3863                                         po->prot_hook.dev = NULL;
3864                                 }
3865                                 spin_unlock(&po->bind_lock);
3866                         }
3867                         break;
3868                 case NETDEV_UP:
3869                         if (dev->ifindex == po->ifindex) {
3870                                 spin_lock(&po->bind_lock);
3871                                 if (po->num)
3872                                         register_prot_hook(sk);
3873                                 spin_unlock(&po->bind_lock);
3874                         }
3875                         break;
3876                 }
3877         }
3878         rcu_read_unlock();
3879         return NOTIFY_DONE;
3880 }
3881
3882
3883 static int packet_ioctl(struct socket *sock, unsigned int cmd,
3884                         unsigned long arg)
3885 {
3886         struct sock *sk = sock->sk;
3887
3888         switch (cmd) {
3889         case SIOCOUTQ:
3890         {
3891                 int amount = sk_wmem_alloc_get(sk);
3892
3893                 return put_user(amount, (int __user *)arg);
3894         }
3895         case SIOCINQ:
3896         {
3897                 struct sk_buff *skb;
3898                 int amount = 0;
3899
3900                 spin_lock_bh(&sk->sk_receive_queue.lock);
3901                 skb = skb_peek(&sk->sk_receive_queue);
3902                 if (skb)
3903                         amount = skb->len;
3904                 spin_unlock_bh(&sk->sk_receive_queue.lock);
3905                 return put_user(amount, (int __user *)arg);
3906         }
3907         case SIOCGSTAMP:
3908                 return sock_get_timestamp(sk, (struct timeval __user *)arg);
3909         case SIOCGSTAMPNS:
3910                 return sock_get_timestampns(sk, (struct timespec __user *)arg);
3911
3912 #ifdef CONFIG_INET
3913         case SIOCADDRT:
3914         case SIOCDELRT:
3915         case SIOCDARP:
3916         case SIOCGARP:
3917         case SIOCSARP:
3918         case SIOCGIFADDR:
3919         case SIOCSIFADDR:
3920         case SIOCGIFBRDADDR:
3921         case SIOCSIFBRDADDR:
3922         case SIOCGIFNETMASK:
3923         case SIOCSIFNETMASK:
3924         case SIOCGIFDSTADDR:
3925         case SIOCSIFDSTADDR:
3926         case SIOCSIFFLAGS:
3927                 return inet_dgram_ops.ioctl(sock, cmd, arg);
3928 #endif
3929
3930         default:
3931                 return -ENOIOCTLCMD;
3932         }
3933         return 0;
3934 }
3935
3936 static unsigned int packet_poll(struct file *file, struct socket *sock,
3937                                 poll_table *wait)
3938 {
3939         struct sock *sk = sock->sk;
3940         struct packet_sock *po = pkt_sk(sk);
3941         unsigned int mask = datagram_poll(file, sock, wait);
3942
3943         spin_lock_bh(&sk->sk_receive_queue.lock);
3944         if (po->rx_ring.pg_vec) {
3945                 if (!packet_previous_rx_frame(po, &po->rx_ring,
3946                         TP_STATUS_KERNEL))
3947                         mask |= POLLIN | POLLRDNORM;
3948         }
3949         if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
3950                 po->pressure = 0;
3951         spin_unlock_bh(&sk->sk_receive_queue.lock);
3952         spin_lock_bh(&sk->sk_write_queue.lock);
3953         if (po->tx_ring.pg_vec) {
3954                 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3955                         mask |= POLLOUT | POLLWRNORM;
3956         }
3957         spin_unlock_bh(&sk->sk_write_queue.lock);
3958         return mask;
3959 }
3960
3961
3962 /* Dirty? Well, I still did not learn better way to account
3963  * for user mmaps.
3964  */
3965
3966 static void packet_mm_open(struct vm_area_struct *vma)
3967 {
3968         struct file *file = vma->vm_file;
3969         struct socket *sock = file->private_data;
3970         struct sock *sk = sock->sk;
3971
3972         if (sk)
3973                 atomic_inc(&pkt_sk(sk)->mapped);
3974 }
3975
3976 static void packet_mm_close(struct vm_area_struct *vma)
3977 {
3978         struct file *file = vma->vm_file;
3979         struct socket *sock = file->private_data;
3980         struct sock *sk = sock->sk;
3981
3982         if (sk)
3983                 atomic_dec(&pkt_sk(sk)->mapped);
3984 }
3985
3986 static const struct vm_operations_struct packet_mmap_ops = {
3987         .open   =       packet_mm_open,
3988         .close  =       packet_mm_close,
3989 };
3990
3991 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3992                         unsigned int len)
3993 {
3994         int i;
3995
3996         for (i = 0; i < len; i++) {
3997                 if (likely(pg_vec[i].buffer)) {
3998                         if (is_vmalloc_addr(pg_vec[i].buffer))
3999                                 vfree(pg_vec[i].buffer);
4000                         else
4001                                 free_pages((unsigned long)pg_vec[i].buffer,
4002                                            order);
4003                         pg_vec[i].buffer = NULL;
4004                 }
4005         }
4006         kfree(pg_vec);
4007 }
4008
4009 static char *alloc_one_pg_vec_page(unsigned long order)
4010 {
4011         char *buffer;
4012         gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4013                           __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4014
4015         buffer = (char *) __get_free_pages(gfp_flags, order);
4016         if (buffer)
4017                 return buffer;
4018
4019         /* __get_free_pages failed, fall back to vmalloc */
4020         buffer = vzalloc((1 << order) * PAGE_SIZE);
4021         if (buffer)
4022                 return buffer;
4023
4024         /* vmalloc failed, lets dig into swap here */
4025         gfp_flags &= ~__GFP_NORETRY;
4026         buffer = (char *) __get_free_pages(gfp_flags, order);
4027         if (buffer)
4028                 return buffer;
4029
4030         /* complete and utter failure */
4031         return NULL;
4032 }
4033
4034 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4035 {
4036         unsigned int block_nr = req->tp_block_nr;
4037         struct pgv *pg_vec;
4038         int i;
4039
4040         pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
4041         if (unlikely(!pg_vec))
4042                 goto out;
4043
4044         for (i = 0; i < block_nr; i++) {
4045                 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4046                 if (unlikely(!pg_vec[i].buffer))
4047                         goto out_free_pgvec;
4048         }
4049
4050 out:
4051         return pg_vec;
4052
4053 out_free_pgvec:
4054         free_pg_vec(pg_vec, order, block_nr);
4055         pg_vec = NULL;
4056         goto out;
4057 }
4058
4059 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4060                 int closing, int tx_ring)
4061 {
4062         struct pgv *pg_vec = NULL;
4063         struct packet_sock *po = pkt_sk(sk);
4064         int was_running, order = 0;
4065         struct packet_ring_buffer *rb;
4066         struct sk_buff_head *rb_queue;
4067         __be16 num;
4068         int err = -EINVAL;
4069         /* Added to avoid minimal code churn */
4070         struct tpacket_req *req = &req_u->req;
4071
4072         /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
4073         if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
4074                 WARN(1, "Tx-ring is not supported.\n");
4075                 goto out;
4076         }
4077
4078         rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4079         rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4080
4081         err = -EBUSY;
4082         if (!closing) {
4083                 if (atomic_read(&po->mapped))
4084                         goto out;
4085                 if (packet_read_pending(rb))
4086                         goto out;
4087         }
4088
4089         if (req->tp_block_nr) {
4090                 /* Sanity tests and some calculations */
4091                 err = -EBUSY;
4092                 if (unlikely(rb->pg_vec))
4093                         goto out;
4094
4095                 switch (po->tp_version) {
4096                 case TPACKET_V1:
4097                         po->tp_hdrlen = TPACKET_HDRLEN;
4098                         break;
4099                 case TPACKET_V2:
4100                         po->tp_hdrlen = TPACKET2_HDRLEN;
4101                         break;
4102                 case TPACKET_V3:
4103                         po->tp_hdrlen = TPACKET3_HDRLEN;
4104                         break;
4105                 }
4106
4107                 err = -EINVAL;
4108                 if (unlikely((int)req->tp_block_size <= 0))
4109                         goto out;
4110                 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
4111                         goto out;
4112                 if (po->tp_version >= TPACKET_V3 &&
4113                     (int)(req->tp_block_size -
4114                           BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
4115                         goto out;
4116                 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
4117                                         po->tp_reserve))
4118                         goto out;
4119                 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4120                         goto out;
4121
4122                 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
4123                 if (unlikely(rb->frames_per_block <= 0))
4124                         goto out;
4125                 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4126                                         req->tp_frame_nr))
4127                         goto out;
4128
4129                 err = -ENOMEM;
4130                 order = get_order(req->tp_block_size);
4131                 pg_vec = alloc_pg_vec(req, order);
4132                 if (unlikely(!pg_vec))
4133                         goto out;
4134                 switch (po->tp_version) {
4135                 case TPACKET_V3:
4136                 /* Transmit path is not supported. We checked
4137                  * it above but just being paranoid
4138                  */
4139                         if (!tx_ring)
4140                                 init_prb_bdqc(po, rb, pg_vec, req_u);
4141                         break;
4142                 default:
4143                         break;
4144                 }
4145         }
4146         /* Done */
4147         else {
4148                 err = -EINVAL;
4149                 if (unlikely(req->tp_frame_nr))
4150                         goto out;
4151         }
4152
4153         lock_sock(sk);
4154
4155         /* Detach socket from network */
4156         spin_lock(&po->bind_lock);
4157         was_running = po->running;
4158         num = po->num;
4159         if (was_running) {
4160                 po->num = 0;
4161                 __unregister_prot_hook(sk, false);
4162         }
4163         spin_unlock(&po->bind_lock);
4164
4165         synchronize_net();
4166
4167         err = -EBUSY;
4168         mutex_lock(&po->pg_vec_lock);
4169         if (closing || atomic_read(&po->mapped) == 0) {
4170                 err = 0;
4171                 spin_lock_bh(&rb_queue->lock);
4172                 swap(rb->pg_vec, pg_vec);
4173                 rb->frame_max = (req->tp_frame_nr - 1);
4174                 rb->head = 0;
4175                 rb->frame_size = req->tp_frame_size;
4176                 spin_unlock_bh(&rb_queue->lock);
4177
4178                 swap(rb->pg_vec_order, order);
4179                 swap(rb->pg_vec_len, req->tp_block_nr);
4180
4181                 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4182                 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4183                                                 tpacket_rcv : packet_rcv;
4184                 skb_queue_purge(rb_queue);
4185                 if (atomic_read(&po->mapped))
4186                         pr_err("packet_mmap: vma is busy: %d\n",
4187                                atomic_read(&po->mapped));
4188         }
4189         mutex_unlock(&po->pg_vec_lock);
4190
4191         spin_lock(&po->bind_lock);
4192         if (was_running) {
4193                 po->num = num;
4194                 register_prot_hook(sk);
4195         }
4196         spin_unlock(&po->bind_lock);
4197         if (closing && (po->tp_version > TPACKET_V2)) {
4198                 /* Because we don't support block-based V3 on tx-ring */
4199                 if (!tx_ring)
4200                         prb_shutdown_retire_blk_timer(po, rb_queue);
4201         }
4202         release_sock(sk);
4203
4204         if (pg_vec)
4205                 free_pg_vec(pg_vec, order, req->tp_block_nr);
4206 out:
4207         return err;
4208 }
4209
4210 static int packet_mmap(struct file *file, struct socket *sock,
4211                 struct vm_area_struct *vma)
4212 {
4213         struct sock *sk = sock->sk;
4214         struct packet_sock *po = pkt_sk(sk);
4215         unsigned long size, expected_size;
4216         struct packet_ring_buffer *rb;
4217         unsigned long start;
4218         int err = -EINVAL;
4219         int i;
4220
4221         if (vma->vm_pgoff)
4222                 return -EINVAL;
4223
4224         mutex_lock(&po->pg_vec_lock);
4225
4226         expected_size = 0;
4227         for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4228                 if (rb->pg_vec) {
4229                         expected_size += rb->pg_vec_len
4230                                                 * rb->pg_vec_pages
4231                                                 * PAGE_SIZE;
4232                 }
4233         }
4234
4235         if (expected_size == 0)
4236                 goto out;
4237
4238         size = vma->vm_end - vma->vm_start;
4239         if (size != expected_size)
4240                 goto out;
4241
4242         start = vma->vm_start;
4243         for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4244                 if (rb->pg_vec == NULL)
4245                         continue;
4246
4247                 for (i = 0; i < rb->pg_vec_len; i++) {
4248                         struct page *page;
4249                         void *kaddr = rb->pg_vec[i].buffer;
4250                         int pg_num;
4251
4252                         for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4253                                 page = pgv_to_page(kaddr);
4254                                 err = vm_insert_page(vma, start, page);
4255                                 if (unlikely(err))
4256                                         goto out;
4257                                 start += PAGE_SIZE;
4258                                 kaddr += PAGE_SIZE;
4259                         }
4260                 }
4261         }
4262
4263         atomic_inc(&po->mapped);
4264         vma->vm_ops = &packet_mmap_ops;
4265         err = 0;
4266
4267 out:
4268         mutex_unlock(&po->pg_vec_lock);
4269         return err;
4270 }
4271
4272 static const struct proto_ops packet_ops_spkt = {
4273         .family =       PF_PACKET,
4274         .owner =        THIS_MODULE,
4275         .release =      packet_release,
4276         .bind =         packet_bind_spkt,
4277         .connect =      sock_no_connect,
4278         .socketpair =   sock_no_socketpair,
4279         .accept =       sock_no_accept,
4280         .getname =      packet_getname_spkt,
4281         .poll =         datagram_poll,
4282         .ioctl =        packet_ioctl,
4283         .listen =       sock_no_listen,
4284         .shutdown =     sock_no_shutdown,
4285         .setsockopt =   sock_no_setsockopt,
4286         .getsockopt =   sock_no_getsockopt,
4287         .sendmsg =      packet_sendmsg_spkt,
4288         .recvmsg =      packet_recvmsg,
4289         .mmap =         sock_no_mmap,
4290         .sendpage =     sock_no_sendpage,
4291 };
4292
4293 static const struct proto_ops packet_ops = {
4294         .family =       PF_PACKET,
4295         .owner =        THIS_MODULE,
4296         .release =      packet_release,
4297         .bind =         packet_bind,
4298         .connect =      sock_no_connect,
4299         .socketpair =   sock_no_socketpair,
4300         .accept =       sock_no_accept,
4301         .getname =      packet_getname,
4302         .poll =         packet_poll,
4303         .ioctl =        packet_ioctl,
4304         .listen =       sock_no_listen,
4305         .shutdown =     sock_no_shutdown,
4306         .setsockopt =   packet_setsockopt,
4307         .getsockopt =   packet_getsockopt,
4308         .sendmsg =      packet_sendmsg,
4309         .recvmsg =      packet_recvmsg,
4310         .mmap =         packet_mmap,
4311         .sendpage =     sock_no_sendpage,
4312 };
4313
4314 static const struct net_proto_family packet_family_ops = {
4315         .family =       PF_PACKET,
4316         .create =       packet_create,
4317         .owner  =       THIS_MODULE,
4318 };
4319
4320 static struct notifier_block packet_netdev_notifier = {
4321         .notifier_call =        packet_notifier,
4322 };
4323
4324 #ifdef CONFIG_PROC_FS
4325
4326 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4327         __acquires(RCU)
4328 {
4329         struct net *net = seq_file_net(seq);
4330
4331         rcu_read_lock();
4332         return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4333 }
4334
4335 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4336 {
4337         struct net *net = seq_file_net(seq);
4338         return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4339 }
4340
4341 static void packet_seq_stop(struct seq_file *seq, void *v)
4342         __releases(RCU)
4343 {
4344         rcu_read_unlock();
4345 }
4346
4347 static int packet_seq_show(struct seq_file *seq, void *v)
4348 {
4349         if (v == SEQ_START_TOKEN)
4350                 seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
4351         else {
4352                 struct sock *s = sk_entry(v);
4353                 const struct packet_sock *po = pkt_sk(s);
4354
4355                 seq_printf(seq,
4356                            "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4357                            s,
4358                            atomic_read(&s->sk_refcnt),
4359                            s->sk_type,
4360                            ntohs(po->num),
4361                            po->ifindex,
4362                            po->running,
4363                            atomic_read(&s->sk_rmem_alloc),
4364                            from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4365                            sock_i_ino(s));
4366         }
4367
4368         return 0;
4369 }
4370
4371 static const struct seq_operations packet_seq_ops = {
4372         .start  = packet_seq_start,
4373         .next   = packet_seq_next,
4374         .stop   = packet_seq_stop,
4375         .show   = packet_seq_show,
4376 };
4377
4378 static int packet_seq_open(struct inode *inode, struct file *file)
4379 {
4380         return seq_open_net(inode, file, &packet_seq_ops,
4381                             sizeof(struct seq_net_private));
4382 }
4383
4384 static const struct file_operations packet_seq_fops = {
4385         .owner          = THIS_MODULE,
4386         .open           = packet_seq_open,
4387         .read           = seq_read,
4388         .llseek         = seq_lseek,
4389         .release        = seq_release_net,
4390 };
4391
4392 #endif
4393
4394 static int __net_init packet_net_init(struct net *net)
4395 {
4396         mutex_init(&net->packet.sklist_lock);
4397         INIT_HLIST_HEAD(&net->packet.sklist);
4398
4399         if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
4400                 return -ENOMEM;
4401
4402         return 0;
4403 }
4404
4405 static void __net_exit packet_net_exit(struct net *net)
4406 {
4407         remove_proc_entry("packet", net->proc_net);
4408 }
4409
4410 static struct pernet_operations packet_net_ops = {
4411         .init = packet_net_init,
4412         .exit = packet_net_exit,
4413 };
4414
4415
4416 static void __exit packet_exit(void)
4417 {
4418         unregister_netdevice_notifier(&packet_netdev_notifier);
4419         unregister_pernet_subsys(&packet_net_ops);
4420         sock_unregister(PF_PACKET);
4421         proto_unregister(&packet_proto);
4422 }
4423
4424 static int __init packet_init(void)
4425 {
4426         int rc = proto_register(&packet_proto, 0);
4427
4428         if (rc != 0)
4429                 goto out;
4430
4431         sock_register(&packet_family_ops);
4432         register_pernet_subsys(&packet_net_ops);
4433         register_netdevice_notifier(&packet_netdev_notifier);
4434 out:
4435         return rc;
4436 }
4437
4438 module_init(packet_init);
4439 module_exit(packet_exit);
4440 MODULE_LICENSE("GPL");
4441 MODULE_ALIAS_NETPROTO(PF_PACKET);