tun: fix tun_chr_aio_read so that aio works
[cascardo/linux.git] / drivers / net / tun.c
1 /*
2  *  TUN - Universal TUN/TAP device driver.
3  *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
4  *
5  *  This program is free software; you can redistribute it and/or modify
6  *  it under the terms of the GNU General Public License as published by
7  *  the Free Software Foundation; either version 2 of the License, or
8  *  (at your option) any later version.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  *  GNU General Public License for more details.
14  *
15  *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
16  */
17
18 /*
19  *  Changes:
20  *
21  *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
22  *    Add TUNSETLINK ioctl to set the link encapsulation
23  *
24  *  Mark Smith <markzzzsmith@yahoo.com.au>
25  *    Use random_ether_addr() for tap MAC address.
26  *
27  *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
28  *    Fixes in packet dropping, queue length setting and queue wakeup.
29  *    Increased default tx queue length.
30  *    Added ethtool API.
31  *    Minor cleanups
32  *
33  *  Daniel Podlejski <underley@underley.eu.org>
34  *    Modifications for 2.3.99-pre5 kernel.
35  */
36
37 #define DRV_NAME        "tun"
38 #define DRV_VERSION     "1.6"
39 #define DRV_DESCRIPTION "Universal TUN/TAP device driver"
40 #define DRV_COPYRIGHT   "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
41
42 #include <linux/module.h>
43 #include <linux/errno.h>
44 #include <linux/kernel.h>
45 #include <linux/major.h>
46 #include <linux/slab.h>
47 #include <linux/smp_lock.h>
48 #include <linux/poll.h>
49 #include <linux/fcntl.h>
50 #include <linux/init.h>
51 #include <linux/skbuff.h>
52 #include <linux/netdevice.h>
53 #include <linux/etherdevice.h>
54 #include <linux/miscdevice.h>
55 #include <linux/ethtool.h>
56 #include <linux/rtnetlink.h>
57 #include <linux/if.h>
58 #include <linux/if_arp.h>
59 #include <linux/if_ether.h>
60 #include <linux/if_tun.h>
61 #include <linux/crc32.h>
62 #include <linux/nsproxy.h>
63 #include <linux/virtio_net.h>
64 #include <net/net_namespace.h>
65 #include <net/netns/generic.h>
66 #include <net/rtnetlink.h>
67 #include <net/sock.h>
68
69 #include <asm/system.h>
70 #include <asm/uaccess.h>
71
72 /* Uncomment to enable debugging */
73 /* #define TUN_DEBUG 1 */
74
75 #ifdef TUN_DEBUG
76 static int debug;
77
78 #define DBG  if(tun->debug)printk
79 #define DBG1 if(debug==2)printk
80 #else
81 #define DBG( a... )
82 #define DBG1( a... )
83 #endif
84
85 #define FLT_EXACT_COUNT 8
86 struct tap_filter {
87         unsigned int    count;    /* Number of addrs. Zero means disabled */
88         u32             mask[2];  /* Mask of the hashed addrs */
89         unsigned char   addr[FLT_EXACT_COUNT][ETH_ALEN];
90 };
91
92 struct tun_file {
93         atomic_t count;
94         struct tun_struct *tun;
95         struct net *net;
96 };
97
98 struct tun_sock;
99
100 struct tun_struct {
101         struct tun_file         *tfile;
102         unsigned int            flags;
103         uid_t                   owner;
104         gid_t                   group;
105
106         struct sk_buff_head     readq;
107
108         struct net_device       *dev;
109         struct fasync_struct    *fasync;
110
111         struct tap_filter       txflt;
112         struct sock             *sk;
113         struct socket           socket;
114
115 #ifdef TUN_DEBUG
116         int debug;
117 #endif
118 };
119
120 struct tun_sock {
121         struct sock             sk;
122         struct tun_struct       *tun;
123 };
124
125 static inline struct tun_sock *tun_sk(struct sock *sk)
126 {
127         return container_of(sk, struct tun_sock, sk);
128 }
129
130 static int tun_attach(struct tun_struct *tun, struct file *file)
131 {
132         struct tun_file *tfile = file->private_data;
133         const struct cred *cred = current_cred();
134         int err;
135
136         ASSERT_RTNL();
137
138         /* Check permissions */
139         if (((tun->owner != -1 && cred->euid != tun->owner) ||
140              (tun->group != -1 && !in_egroup_p(tun->group))) &&
141                 !capable(CAP_NET_ADMIN))
142                 return -EPERM;
143
144         netif_tx_lock_bh(tun->dev);
145
146         err = -EINVAL;
147         if (tfile->tun)
148                 goto out;
149
150         err = -EBUSY;
151         if (tun->tfile)
152                 goto out;
153
154         err = 0;
155         tfile->tun = tun;
156         tun->tfile = tfile;
157         dev_hold(tun->dev);
158         sock_hold(tun->sk);
159         atomic_inc(&tfile->count);
160
161 out:
162         netif_tx_unlock_bh(tun->dev);
163         return err;
164 }
165
166 static void __tun_detach(struct tun_struct *tun)
167 {
168         /* Detach from net device */
169         netif_tx_lock_bh(tun->dev);
170         tun->tfile = NULL;
171         netif_tx_unlock_bh(tun->dev);
172
173         /* Drop read queue */
174         skb_queue_purge(&tun->readq);
175
176         /* Drop the extra count on the net device */
177         dev_put(tun->dev);
178 }
179
180 static void tun_detach(struct tun_struct *tun)
181 {
182         rtnl_lock();
183         __tun_detach(tun);
184         rtnl_unlock();
185 }
186
187 static struct tun_struct *__tun_get(struct tun_file *tfile)
188 {
189         struct tun_struct *tun = NULL;
190
191         if (atomic_inc_not_zero(&tfile->count))
192                 tun = tfile->tun;
193
194         return tun;
195 }
196
197 static struct tun_struct *tun_get(struct file *file)
198 {
199         return __tun_get(file->private_data);
200 }
201
202 static void tun_put(struct tun_struct *tun)
203 {
204         struct tun_file *tfile = tun->tfile;
205
206         if (atomic_dec_and_test(&tfile->count))
207                 tun_detach(tfile->tun);
208 }
209
210 /* TAP filterting */
211 static void addr_hash_set(u32 *mask, const u8 *addr)
212 {
213         int n = ether_crc(ETH_ALEN, addr) >> 26;
214         mask[n >> 5] |= (1 << (n & 31));
215 }
216
217 static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
218 {
219         int n = ether_crc(ETH_ALEN, addr) >> 26;
220         return mask[n >> 5] & (1 << (n & 31));
221 }
222
223 static int update_filter(struct tap_filter *filter, void __user *arg)
224 {
225         struct { u8 u[ETH_ALEN]; } *addr;
226         struct tun_filter uf;
227         int err, alen, n, nexact;
228
229         if (copy_from_user(&uf, arg, sizeof(uf)))
230                 return -EFAULT;
231
232         if (!uf.count) {
233                 /* Disabled */
234                 filter->count = 0;
235                 return 0;
236         }
237
238         alen = ETH_ALEN * uf.count;
239         addr = kmalloc(alen, GFP_KERNEL);
240         if (!addr)
241                 return -ENOMEM;
242
243         if (copy_from_user(addr, arg + sizeof(uf), alen)) {
244                 err = -EFAULT;
245                 goto done;
246         }
247
248         /* The filter is updated without holding any locks. Which is
249          * perfectly safe. We disable it first and in the worst
250          * case we'll accept a few undesired packets. */
251         filter->count = 0;
252         wmb();
253
254         /* Use first set of addresses as an exact filter */
255         for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
256                 memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
257
258         nexact = n;
259
260         /* Remaining multicast addresses are hashed,
261          * unicast will leave the filter disabled. */
262         memset(filter->mask, 0, sizeof(filter->mask));
263         for (; n < uf.count; n++) {
264                 if (!is_multicast_ether_addr(addr[n].u)) {
265                         err = 0; /* no filter */
266                         goto done;
267                 }
268                 addr_hash_set(filter->mask, addr[n].u);
269         }
270
271         /* For ALLMULTI just set the mask to all ones.
272          * This overrides the mask populated above. */
273         if ((uf.flags & TUN_FLT_ALLMULTI))
274                 memset(filter->mask, ~0, sizeof(filter->mask));
275
276         /* Now enable the filter */
277         wmb();
278         filter->count = nexact;
279
280         /* Return the number of exact filters */
281         err = nexact;
282
283 done:
284         kfree(addr);
285         return err;
286 }
287
288 /* Returns: 0 - drop, !=0 - accept */
289 static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
290 {
291         /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
292          * at this point. */
293         struct ethhdr *eh = (struct ethhdr *) skb->data;
294         int i;
295
296         /* Exact match */
297         for (i = 0; i < filter->count; i++)
298                 if (!compare_ether_addr(eh->h_dest, filter->addr[i]))
299                         return 1;
300
301         /* Inexact match (multicast only) */
302         if (is_multicast_ether_addr(eh->h_dest))
303                 return addr_hash_test(filter->mask, eh->h_dest);
304
305         return 0;
306 }
307
308 /*
309  * Checks whether the packet is accepted or not.
310  * Returns: 0 - drop, !=0 - accept
311  */
312 static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
313 {
314         if (!filter->count)
315                 return 1;
316
317         return run_filter(filter, skb);
318 }
319
320 /* Network device part of the driver */
321
322 static const struct ethtool_ops tun_ethtool_ops;
323
324 /* Net device detach from fd. */
325 static void tun_net_uninit(struct net_device *dev)
326 {
327         struct tun_struct *tun = netdev_priv(dev);
328         struct tun_file *tfile = tun->tfile;
329
330         /* Inform the methods they need to stop using the dev.
331          */
332         if (tfile) {
333                 wake_up_all(&tun->socket.wait);
334                 if (atomic_dec_and_test(&tfile->count))
335                         __tun_detach(tun);
336         }
337 }
338
339 static void tun_free_netdev(struct net_device *dev)
340 {
341         struct tun_struct *tun = netdev_priv(dev);
342
343         sock_put(tun->sk);
344 }
345
346 /* Net device open. */
347 static int tun_net_open(struct net_device *dev)
348 {
349         netif_start_queue(dev);
350         return 0;
351 }
352
353 /* Net device close. */
354 static int tun_net_close(struct net_device *dev)
355 {
356         netif_stop_queue(dev);
357         return 0;
358 }
359
360 /* Net device start xmit */
361 static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
362 {
363         struct tun_struct *tun = netdev_priv(dev);
364
365         DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len);
366
367         /* Drop packet if interface is not attached */
368         if (!tun->tfile)
369                 goto drop;
370
371         /* Drop if the filter does not like it.
372          * This is a noop if the filter is disabled.
373          * Filter can be enabled only for the TAP devices. */
374         if (!check_filter(&tun->txflt, skb))
375                 goto drop;
376
377         if (skb_queue_len(&tun->readq) >= dev->tx_queue_len) {
378                 if (!(tun->flags & TUN_ONE_QUEUE)) {
379                         /* Normal queueing mode. */
380                         /* Packet scheduler handles dropping of further packets. */
381                         netif_stop_queue(dev);
382
383                         /* We won't see all dropped packets individually, so overrun
384                          * error is more appropriate. */
385                         dev->stats.tx_fifo_errors++;
386                 } else {
387                         /* Single queue mode.
388                          * Driver handles dropping of all packets itself. */
389                         goto drop;
390                 }
391         }
392
393         /* Enqueue packet */
394         skb_queue_tail(&tun->readq, skb);
395         dev->trans_start = jiffies;
396
397         /* Notify and wake up reader process */
398         if (tun->flags & TUN_FASYNC)
399                 kill_fasync(&tun->fasync, SIGIO, POLL_IN);
400         wake_up_interruptible(&tun->socket.wait);
401         return 0;
402
403 drop:
404         dev->stats.tx_dropped++;
405         kfree_skb(skb);
406         return 0;
407 }
408
409 static void tun_net_mclist(struct net_device *dev)
410 {
411         /*
412          * This callback is supposed to deal with mc filter in
413          * _rx_ path and has nothing to do with the _tx_ path.
414          * In rx path we always accept everything userspace gives us.
415          */
416         return;
417 }
418
419 #define MIN_MTU 68
420 #define MAX_MTU 65535
421
422 static int
423 tun_net_change_mtu(struct net_device *dev, int new_mtu)
424 {
425         if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU)
426                 return -EINVAL;
427         dev->mtu = new_mtu;
428         return 0;
429 }
430
431 static const struct net_device_ops tun_netdev_ops = {
432         .ndo_uninit             = tun_net_uninit,
433         .ndo_open               = tun_net_open,
434         .ndo_stop               = tun_net_close,
435         .ndo_start_xmit         = tun_net_xmit,
436         .ndo_change_mtu         = tun_net_change_mtu,
437 };
438
439 static const struct net_device_ops tap_netdev_ops = {
440         .ndo_uninit             = tun_net_uninit,
441         .ndo_open               = tun_net_open,
442         .ndo_stop               = tun_net_close,
443         .ndo_start_xmit         = tun_net_xmit,
444         .ndo_change_mtu         = tun_net_change_mtu,
445         .ndo_set_multicast_list = tun_net_mclist,
446         .ndo_set_mac_address    = eth_mac_addr,
447         .ndo_validate_addr      = eth_validate_addr,
448 };
449
450 /* Initialize net device. */
451 static void tun_net_init(struct net_device *dev)
452 {
453         struct tun_struct *tun = netdev_priv(dev);
454
455         switch (tun->flags & TUN_TYPE_MASK) {
456         case TUN_TUN_DEV:
457                 dev->netdev_ops = &tun_netdev_ops;
458
459                 /* Point-to-Point TUN Device */
460                 dev->hard_header_len = 0;
461                 dev->addr_len = 0;
462                 dev->mtu = 1500;
463
464                 /* Zero header length */
465                 dev->type = ARPHRD_NONE;
466                 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
467                 dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
468                 break;
469
470         case TUN_TAP_DEV:
471                 dev->netdev_ops = &tap_netdev_ops;
472                 /* Ethernet TAP Device */
473                 ether_setup(dev);
474
475                 random_ether_addr(dev->dev_addr);
476
477                 dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
478                 break;
479         }
480 }
481
482 /* Character device part */
483
484 /* Poll */
485 static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
486 {
487         struct tun_file *tfile = file->private_data;
488         struct tun_struct *tun = __tun_get(tfile);
489         struct sock *sk = tun->sk;
490         unsigned int mask = 0;
491
492         if (!tun)
493                 return POLLERR;
494
495         DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
496
497         poll_wait(file, &tun->socket.wait, wait);
498
499         if (!skb_queue_empty(&tun->readq))
500                 mask |= POLLIN | POLLRDNORM;
501
502         if (sock_writeable(sk) ||
503             (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
504              sock_writeable(sk)))
505                 mask |= POLLOUT | POLLWRNORM;
506
507         if (tun->dev->reg_state != NETREG_REGISTERED)
508                 mask = POLLERR;
509
510         tun_put(tun);
511         return mask;
512 }
513
514 /* prepad is the amount to reserve at front.  len is length after that.
515  * linear is a hint as to how much to copy (usually headers). */
516 static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
517                                             size_t prepad, size_t len,
518                                             size_t linear, int noblock)
519 {
520         struct sock *sk = tun->sk;
521         struct sk_buff *skb;
522         int err;
523
524         /* Under a page?  Don't bother with paged skb. */
525         if (prepad + len < PAGE_SIZE || !linear)
526                 linear = len;
527
528         skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
529                                    &err);
530         if (!skb)
531                 return ERR_PTR(err);
532
533         skb_reserve(skb, prepad);
534         skb_put(skb, linear);
535         skb->data_len = len - linear;
536         skb->len += len - linear;
537
538         return skb;
539 }
540
541 /* Get packet from user space buffer */
542 static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
543                                        struct iovec *iv, size_t count,
544                                        int noblock)
545 {
546         struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
547         struct sk_buff *skb;
548         size_t len = count, align = 0;
549         struct virtio_net_hdr gso = { 0 };
550
551         if (!(tun->flags & TUN_NO_PI)) {
552                 if ((len -= sizeof(pi)) > count)
553                         return -EINVAL;
554
555                 if(memcpy_fromiovec((void *)&pi, iv, sizeof(pi)))
556                         return -EFAULT;
557         }
558
559         if (tun->flags & TUN_VNET_HDR) {
560                 if ((len -= sizeof(gso)) > count)
561                         return -EINVAL;
562
563                 if (memcpy_fromiovec((void *)&gso, iv, sizeof(gso)))
564                         return -EFAULT;
565
566                 if (gso.hdr_len > len)
567                         return -EINVAL;
568         }
569
570         if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
571                 align = NET_IP_ALIGN;
572                 if (unlikely(len < ETH_HLEN ||
573                              (gso.hdr_len && gso.hdr_len < ETH_HLEN)))
574                         return -EINVAL;
575         }
576
577         skb = tun_alloc_skb(tun, align, len, gso.hdr_len, noblock);
578         if (IS_ERR(skb)) {
579                 if (PTR_ERR(skb) != -EAGAIN)
580                         tun->dev->stats.rx_dropped++;
581                 return PTR_ERR(skb);
582         }
583
584         if (skb_copy_datagram_from_iovec(skb, 0, iv, len)) {
585                 tun->dev->stats.rx_dropped++;
586                 kfree_skb(skb);
587                 return -EFAULT;
588         }
589
590         if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
591                 if (!skb_partial_csum_set(skb, gso.csum_start,
592                                           gso.csum_offset)) {
593                         tun->dev->stats.rx_frame_errors++;
594                         kfree_skb(skb);
595                         return -EINVAL;
596                 }
597         } else if (tun->flags & TUN_NOCHECKSUM)
598                 skb->ip_summed = CHECKSUM_UNNECESSARY;
599
600         switch (tun->flags & TUN_TYPE_MASK) {
601         case TUN_TUN_DEV:
602                 if (tun->flags & TUN_NO_PI) {
603                         switch (skb->data[0] & 0xf0) {
604                         case 0x40:
605                                 pi.proto = htons(ETH_P_IP);
606                                 break;
607                         case 0x60:
608                                 pi.proto = htons(ETH_P_IPV6);
609                                 break;
610                         default:
611                                 tun->dev->stats.rx_dropped++;
612                                 kfree_skb(skb);
613                                 return -EINVAL;
614                         }
615                 }
616
617                 skb_reset_mac_header(skb);
618                 skb->protocol = pi.proto;
619                 skb->dev = tun->dev;
620                 break;
621         case TUN_TAP_DEV:
622                 skb->protocol = eth_type_trans(skb, tun->dev);
623                 break;
624         };
625
626         if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
627                 pr_debug("GSO!\n");
628                 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
629                 case VIRTIO_NET_HDR_GSO_TCPV4:
630                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
631                         break;
632                 case VIRTIO_NET_HDR_GSO_TCPV6:
633                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
634                         break;
635                 default:
636                         tun->dev->stats.rx_frame_errors++;
637                         kfree_skb(skb);
638                         return -EINVAL;
639                 }
640
641                 if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
642                         skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
643
644                 skb_shinfo(skb)->gso_size = gso.gso_size;
645                 if (skb_shinfo(skb)->gso_size == 0) {
646                         tun->dev->stats.rx_frame_errors++;
647                         kfree_skb(skb);
648                         return -EINVAL;
649                 }
650
651                 /* Header must be checked, and gso_segs computed. */
652                 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
653                 skb_shinfo(skb)->gso_segs = 0;
654         }
655
656         netif_rx_ni(skb);
657
658         tun->dev->stats.rx_packets++;
659         tun->dev->stats.rx_bytes += len;
660
661         return count;
662 }
663
664 static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
665                               unsigned long count, loff_t pos)
666 {
667         struct file *file = iocb->ki_filp;
668         struct tun_struct *tun = tun_get(file);
669         ssize_t result;
670
671         if (!tun)
672                 return -EBADFD;
673
674         DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count);
675
676         result = tun_get_user(tun, (struct iovec *)iv, iov_length(iv, count),
677                               file->f_flags & O_NONBLOCK);
678
679         tun_put(tun);
680         return result;
681 }
682
683 /* Put packet to the user space buffer */
684 static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
685                                        struct sk_buff *skb,
686                                        const struct iovec *iv, int len)
687 {
688         struct tun_pi pi = { 0, skb->protocol };
689         ssize_t total = 0;
690
691         if (!(tun->flags & TUN_NO_PI)) {
692                 if ((len -= sizeof(pi)) < 0)
693                         return -EINVAL;
694
695                 if (len < skb->len) {
696                         /* Packet will be striped */
697                         pi.flags |= TUN_PKT_STRIP;
698                 }
699
700                 if (memcpy_toiovecend(iv, (void *) &pi, 0, sizeof(pi)))
701                         return -EFAULT;
702                 total += sizeof(pi);
703         }
704
705         if (tun->flags & TUN_VNET_HDR) {
706                 struct virtio_net_hdr gso = { 0 }; /* no info leak */
707                 if ((len -= sizeof(gso)) < 0)
708                         return -EINVAL;
709
710                 if (skb_is_gso(skb)) {
711                         struct skb_shared_info *sinfo = skb_shinfo(skb);
712
713                         /* This is a hint as to how much should be linear. */
714                         gso.hdr_len = skb_headlen(skb);
715                         gso.gso_size = sinfo->gso_size;
716                         if (sinfo->gso_type & SKB_GSO_TCPV4)
717                                 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
718                         else if (sinfo->gso_type & SKB_GSO_TCPV6)
719                                 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
720                         else
721                                 BUG();
722                         if (sinfo->gso_type & SKB_GSO_TCP_ECN)
723                                 gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
724                 } else
725                         gso.gso_type = VIRTIO_NET_HDR_GSO_NONE;
726
727                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
728                         gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
729                         gso.csum_start = skb->csum_start - skb_headroom(skb);
730                         gso.csum_offset = skb->csum_offset;
731                 } /* else everything is zero */
732
733                 if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
734                                                sizeof(gso))))
735                         return -EFAULT;
736                 total += sizeof(gso);
737         }
738
739         len = min_t(int, skb->len, len);
740
741         skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
742         total += len;
743
744         tun->dev->stats.tx_packets++;
745         tun->dev->stats.tx_bytes += len;
746
747         return total;
748 }
749
750 static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
751                             unsigned long count, loff_t pos)
752 {
753         struct file *file = iocb->ki_filp;
754         struct tun_file *tfile = file->private_data;
755         struct tun_struct *tun = __tun_get(tfile);
756         DECLARE_WAITQUEUE(wait, current);
757         struct sk_buff *skb;
758         ssize_t len, ret = 0;
759
760         if (!tun)
761                 return -EBADFD;
762
763         DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
764
765         len = iov_length(iv, count);
766         if (len < 0) {
767                 ret = -EINVAL;
768                 goto out;
769         }
770
771         add_wait_queue(&tun->socket.wait, &wait);
772         while (len) {
773                 current->state = TASK_INTERRUPTIBLE;
774
775                 /* Read frames from the queue */
776                 if (!(skb=skb_dequeue(&tun->readq))) {
777                         if (file->f_flags & O_NONBLOCK) {
778                                 ret = -EAGAIN;
779                                 break;
780                         }
781                         if (signal_pending(current)) {
782                                 ret = -ERESTARTSYS;
783                                 break;
784                         }
785                         if (tun->dev->reg_state != NETREG_REGISTERED) {
786                                 ret = -EIO;
787                                 break;
788                         }
789
790                         /* Nothing to read, let's sleep */
791                         schedule();
792                         continue;
793                 }
794                 netif_wake_queue(tun->dev);
795
796                 ret = tun_put_user(tun, skb, iv, len);
797                 kfree_skb(skb);
798                 break;
799         }
800
801         current->state = TASK_RUNNING;
802         remove_wait_queue(&tun->socket.wait, &wait);
803
804 out:
805         tun_put(tun);
806         return ret;
807 }
808
809 static void tun_setup(struct net_device *dev)
810 {
811         struct tun_struct *tun = netdev_priv(dev);
812
813         skb_queue_head_init(&tun->readq);
814
815         tun->owner = -1;
816         tun->group = -1;
817
818         dev->ethtool_ops = &tun_ethtool_ops;
819         dev->destructor = tun_free_netdev;
820 }
821
822 /* Trivial set of netlink ops to allow deleting tun or tap
823  * device with netlink.
824  */
825 static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
826 {
827         return -EINVAL;
828 }
829
830 static struct rtnl_link_ops tun_link_ops __read_mostly = {
831         .kind           = DRV_NAME,
832         .priv_size      = sizeof(struct tun_struct),
833         .setup          = tun_setup,
834         .validate       = tun_validate,
835 };
836
837 static void tun_sock_write_space(struct sock *sk)
838 {
839         struct tun_struct *tun;
840
841         if (!sock_writeable(sk))
842                 return;
843
844         if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
845                 wake_up_interruptible_sync(sk->sk_sleep);
846
847         if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
848                 return;
849
850         tun = container_of(sk, struct tun_sock, sk)->tun;
851         kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
852 }
853
854 static void tun_sock_destruct(struct sock *sk)
855 {
856         free_netdev(container_of(sk, struct tun_sock, sk)->tun->dev);
857 }
858
859 static struct proto tun_proto = {
860         .name           = "tun",
861         .owner          = THIS_MODULE,
862         .obj_size       = sizeof(struct tun_sock),
863 };
864
865 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
866 {
867         struct sock *sk;
868         struct tun_struct *tun;
869         struct net_device *dev;
870         int err;
871
872         dev = __dev_get_by_name(net, ifr->ifr_name);
873         if (dev) {
874                 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
875                         tun = netdev_priv(dev);
876                 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
877                         tun = netdev_priv(dev);
878                 else
879                         return -EINVAL;
880
881                 err = tun_attach(tun, file);
882                 if (err < 0)
883                         return err;
884         }
885         else {
886                 char *name;
887                 unsigned long flags = 0;
888
889                 err = -EINVAL;
890
891                 if (!capable(CAP_NET_ADMIN))
892                         return -EPERM;
893
894                 /* Set dev type */
895                 if (ifr->ifr_flags & IFF_TUN) {
896                         /* TUN device */
897                         flags |= TUN_TUN_DEV;
898                         name = "tun%d";
899                 } else if (ifr->ifr_flags & IFF_TAP) {
900                         /* TAP device */
901                         flags |= TUN_TAP_DEV;
902                         name = "tap%d";
903                 } else
904                         goto failed;
905
906                 if (*ifr->ifr_name)
907                         name = ifr->ifr_name;
908
909                 dev = alloc_netdev(sizeof(struct tun_struct), name,
910                                    tun_setup);
911                 if (!dev)
912                         return -ENOMEM;
913
914                 dev_net_set(dev, net);
915                 dev->rtnl_link_ops = &tun_link_ops;
916
917                 tun = netdev_priv(dev);
918                 tun->dev = dev;
919                 tun->flags = flags;
920                 tun->txflt.count = 0;
921
922                 err = -ENOMEM;
923                 sk = sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
924                 if (!sk)
925                         goto err_free_dev;
926
927                 init_waitqueue_head(&tun->socket.wait);
928                 sock_init_data(&tun->socket, sk);
929                 sk->sk_write_space = tun_sock_write_space;
930                 sk->sk_sndbuf = INT_MAX;
931
932                 tun->sk = sk;
933                 container_of(sk, struct tun_sock, sk)->tun = tun;
934
935                 tun_net_init(dev);
936
937                 if (strchr(dev->name, '%')) {
938                         err = dev_alloc_name(dev, dev->name);
939                         if (err < 0)
940                                 goto err_free_sk;
941                 }
942
943                 err = -EINVAL;
944                 err = register_netdevice(tun->dev);
945                 if (err < 0)
946                         goto err_free_sk;
947
948                 sk->sk_destruct = tun_sock_destruct;
949
950                 err = tun_attach(tun, file);
951                 if (err < 0)
952                         goto failed;
953         }
954
955         DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name);
956
957         if (ifr->ifr_flags & IFF_NO_PI)
958                 tun->flags |= TUN_NO_PI;
959         else
960                 tun->flags &= ~TUN_NO_PI;
961
962         if (ifr->ifr_flags & IFF_ONE_QUEUE)
963                 tun->flags |= TUN_ONE_QUEUE;
964         else
965                 tun->flags &= ~TUN_ONE_QUEUE;
966
967         if (ifr->ifr_flags & IFF_VNET_HDR)
968                 tun->flags |= TUN_VNET_HDR;
969         else
970                 tun->flags &= ~TUN_VNET_HDR;
971
972         /* Make sure persistent devices do not get stuck in
973          * xoff state.
974          */
975         if (netif_running(tun->dev))
976                 netif_wake_queue(tun->dev);
977
978         strcpy(ifr->ifr_name, tun->dev->name);
979         return 0;
980
981  err_free_sk:
982         sock_put(sk);
983  err_free_dev:
984         free_netdev(dev);
985  failed:
986         return err;
987 }
988
989 static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr)
990 {
991         struct tun_struct *tun = tun_get(file);
992
993         if (!tun)
994                 return -EBADFD;
995
996         DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name);
997
998         strcpy(ifr->ifr_name, tun->dev->name);
999
1000         ifr->ifr_flags = 0;
1001
1002         if (ifr->ifr_flags & TUN_TUN_DEV)
1003                 ifr->ifr_flags |= IFF_TUN;
1004         else
1005                 ifr->ifr_flags |= IFF_TAP;
1006
1007         if (tun->flags & TUN_NO_PI)
1008                 ifr->ifr_flags |= IFF_NO_PI;
1009
1010         if (tun->flags & TUN_ONE_QUEUE)
1011                 ifr->ifr_flags |= IFF_ONE_QUEUE;
1012
1013         if (tun->flags & TUN_VNET_HDR)
1014                 ifr->ifr_flags |= IFF_VNET_HDR;
1015
1016         tun_put(tun);
1017         return 0;
1018 }
1019
1020 /* This is like a cut-down ethtool ops, except done via tun fd so no
1021  * privs required. */
1022 static int set_offload(struct net_device *dev, unsigned long arg)
1023 {
1024         unsigned int old_features, features;
1025
1026         old_features = dev->features;
1027         /* Unset features, set them as we chew on the arg. */
1028         features = (old_features & ~(NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST
1029                                     |NETIF_F_TSO_ECN|NETIF_F_TSO|NETIF_F_TSO6));
1030
1031         if (arg & TUN_F_CSUM) {
1032                 features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1033                 arg &= ~TUN_F_CSUM;
1034
1035                 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
1036                         if (arg & TUN_F_TSO_ECN) {
1037                                 features |= NETIF_F_TSO_ECN;
1038                                 arg &= ~TUN_F_TSO_ECN;
1039                         }
1040                         if (arg & TUN_F_TSO4)
1041                                 features |= NETIF_F_TSO;
1042                         if (arg & TUN_F_TSO6)
1043                                 features |= NETIF_F_TSO6;
1044                         arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
1045                 }
1046         }
1047
1048         /* This gives the user a way to test for new features in future by
1049          * trying to set them. */
1050         if (arg)
1051                 return -EINVAL;
1052
1053         dev->features = features;
1054         if (old_features != dev->features)
1055                 netdev_features_change(dev);
1056
1057         return 0;
1058 }
1059
1060 static int tun_chr_ioctl(struct inode *inode, struct file *file,
1061                          unsigned int cmd, unsigned long arg)
1062 {
1063         struct tun_file *tfile = file->private_data;
1064         struct tun_struct *tun;
1065         void __user* argp = (void __user*)arg;
1066         struct ifreq ifr;
1067         int sndbuf;
1068         int ret;
1069
1070         if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
1071                 if (copy_from_user(&ifr, argp, sizeof ifr))
1072                         return -EFAULT;
1073
1074         if (cmd == TUNGETFEATURES) {
1075                 /* Currently this just means: "what IFF flags are valid?".
1076                  * This is needed because we never checked for invalid flags on
1077                  * TUNSETIFF. */
1078                 return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
1079                                 IFF_VNET_HDR,
1080                                 (unsigned int __user*)argp);
1081         }
1082
1083         tun = __tun_get(tfile);
1084         if (cmd == TUNSETIFF && !tun) {
1085                 int err;
1086
1087                 ifr.ifr_name[IFNAMSIZ-1] = '\0';
1088
1089                 rtnl_lock();
1090                 err = tun_set_iff(tfile->net, file, &ifr);
1091                 rtnl_unlock();
1092
1093                 if (err)
1094                         return err;
1095
1096                 if (copy_to_user(argp, &ifr, sizeof(ifr)))
1097                         return -EFAULT;
1098                 return 0;
1099         }
1100
1101
1102         if (!tun)
1103                 return -EBADFD;
1104
1105         DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd);
1106
1107         ret = 0;
1108         switch (cmd) {
1109         case TUNGETIFF:
1110                 ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr);
1111                 if (ret)
1112                         break;
1113
1114                 if (copy_to_user(argp, &ifr, sizeof(ifr)))
1115                         ret = -EFAULT;
1116                 break;
1117
1118         case TUNSETNOCSUM:
1119                 /* Disable/Enable checksum */
1120                 if (arg)
1121                         tun->flags |= TUN_NOCHECKSUM;
1122                 else
1123                         tun->flags &= ~TUN_NOCHECKSUM;
1124
1125                 DBG(KERN_INFO "%s: checksum %s\n",
1126                     tun->dev->name, arg ? "disabled" : "enabled");
1127                 break;
1128
1129         case TUNSETPERSIST:
1130                 /* Disable/Enable persist mode */
1131                 if (arg)
1132                         tun->flags |= TUN_PERSIST;
1133                 else
1134                         tun->flags &= ~TUN_PERSIST;
1135
1136                 DBG(KERN_INFO "%s: persist %s\n",
1137                     tun->dev->name, arg ? "enabled" : "disabled");
1138                 break;
1139
1140         case TUNSETOWNER:
1141                 /* Set owner of the device */
1142                 tun->owner = (uid_t) arg;
1143
1144                 DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner);
1145                 break;
1146
1147         case TUNSETGROUP:
1148                 /* Set group of the device */
1149                 tun->group= (gid_t) arg;
1150
1151                 DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group);
1152                 break;
1153
1154         case TUNSETLINK:
1155                 /* Only allow setting the type when the interface is down */
1156                 rtnl_lock();
1157                 if (tun->dev->flags & IFF_UP) {
1158                         DBG(KERN_INFO "%s: Linktype set failed because interface is up\n",
1159                                 tun->dev->name);
1160                         ret = -EBUSY;
1161                 } else {
1162                         tun->dev->type = (int) arg;
1163                         DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type);
1164                         ret = 0;
1165                 }
1166                 rtnl_unlock();
1167                 break;
1168
1169 #ifdef TUN_DEBUG
1170         case TUNSETDEBUG:
1171                 tun->debug = arg;
1172                 break;
1173 #endif
1174         case TUNSETOFFLOAD:
1175                 rtnl_lock();
1176                 ret = set_offload(tun->dev, arg);
1177                 rtnl_unlock();
1178                 break;
1179
1180         case TUNSETTXFILTER:
1181                 /* Can be set only for TAPs */
1182                 ret = -EINVAL;
1183                 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
1184                         break;
1185                 rtnl_lock();
1186                 ret = update_filter(&tun->txflt, (void __user *)arg);
1187                 rtnl_unlock();
1188                 break;
1189
1190         case SIOCGIFHWADDR:
1191                 /* Get hw addres */
1192                 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
1193                 ifr.ifr_hwaddr.sa_family = tun->dev->type;
1194                 if (copy_to_user(argp, &ifr, sizeof ifr))
1195                         ret = -EFAULT;
1196                 break;
1197
1198         case SIOCSIFHWADDR:
1199                 /* Set hw address */
1200                 DBG(KERN_DEBUG "%s: set hw address: %pM\n",
1201                         tun->dev->name, ifr.ifr_hwaddr.sa_data);
1202
1203                 rtnl_lock();
1204                 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
1205                 rtnl_unlock();
1206                 break;
1207
1208         case TUNGETSNDBUF:
1209                 sndbuf = tun->sk->sk_sndbuf;
1210                 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
1211                         ret = -EFAULT;
1212                 break;
1213
1214         case TUNSETSNDBUF:
1215                 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
1216                         ret = -EFAULT;
1217                         break;
1218                 }
1219
1220                 tun->sk->sk_sndbuf = sndbuf;
1221                 break;
1222
1223         default:
1224                 ret = -EINVAL;
1225                 break;
1226         };
1227
1228         tun_put(tun);
1229         return ret;
1230 }
1231
1232 static int tun_chr_fasync(int fd, struct file *file, int on)
1233 {
1234         struct tun_struct *tun = tun_get(file);
1235         int ret;
1236
1237         if (!tun)
1238                 return -EBADFD;
1239
1240         DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->dev->name, on);
1241
1242         lock_kernel();
1243         if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
1244                 goto out;
1245
1246         if (on) {
1247                 ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
1248                 if (ret)
1249                         goto out;
1250                 tun->flags |= TUN_FASYNC;
1251         } else
1252                 tun->flags &= ~TUN_FASYNC;
1253         ret = 0;
1254 out:
1255         unlock_kernel();
1256         tun_put(tun);
1257         return ret;
1258 }
1259
1260 static int tun_chr_open(struct inode *inode, struct file * file)
1261 {
1262         struct tun_file *tfile;
1263         cycle_kernel_lock();
1264         DBG1(KERN_INFO "tunX: tun_chr_open\n");
1265
1266         tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
1267         if (!tfile)
1268                 return -ENOMEM;
1269         atomic_set(&tfile->count, 0);
1270         tfile->tun = NULL;
1271         tfile->net = get_net(current->nsproxy->net_ns);
1272         file->private_data = tfile;
1273         return 0;
1274 }
1275
1276 static int tun_chr_close(struct inode *inode, struct file *file)
1277 {
1278         struct tun_file *tfile = file->private_data;
1279         struct tun_struct *tun = __tun_get(tfile);
1280
1281
1282         if (tun) {
1283                 DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name);
1284
1285                 rtnl_lock();
1286                 __tun_detach(tun);
1287
1288                 /* If desireable, unregister the netdevice. */
1289                 if (!(tun->flags & TUN_PERSIST))
1290                         unregister_netdevice(tun->dev);
1291
1292                 rtnl_unlock();
1293         }
1294
1295         tun = tfile->tun;
1296         if (tun)
1297                 sock_put(tun->sk);
1298
1299         put_net(tfile->net);
1300         kfree(tfile);
1301
1302         return 0;
1303 }
1304
1305 static const struct file_operations tun_fops = {
1306         .owner  = THIS_MODULE,
1307         .llseek = no_llseek,
1308         .read  = do_sync_read,
1309         .aio_read  = tun_chr_aio_read,
1310         .write = do_sync_write,
1311         .aio_write = tun_chr_aio_write,
1312         .poll   = tun_chr_poll,
1313         .ioctl  = tun_chr_ioctl,
1314         .open   = tun_chr_open,
1315         .release = tun_chr_close,
1316         .fasync = tun_chr_fasync
1317 };
1318
1319 static struct miscdevice tun_miscdev = {
1320         .minor = TUN_MINOR,
1321         .name = "tun",
1322         .fops = &tun_fops,
1323 };
1324
1325 /* ethtool interface */
1326
1327 static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1328 {
1329         cmd->supported          = 0;
1330         cmd->advertising        = 0;
1331         cmd->speed              = SPEED_10;
1332         cmd->duplex             = DUPLEX_FULL;
1333         cmd->port               = PORT_TP;
1334         cmd->phy_address        = 0;
1335         cmd->transceiver        = XCVR_INTERNAL;
1336         cmd->autoneg            = AUTONEG_DISABLE;
1337         cmd->maxtxpkt           = 0;
1338         cmd->maxrxpkt           = 0;
1339         return 0;
1340 }
1341
1342 static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1343 {
1344         struct tun_struct *tun = netdev_priv(dev);
1345
1346         strcpy(info->driver, DRV_NAME);
1347         strcpy(info->version, DRV_VERSION);
1348         strcpy(info->fw_version, "N/A");
1349
1350         switch (tun->flags & TUN_TYPE_MASK) {
1351         case TUN_TUN_DEV:
1352                 strcpy(info->bus_info, "tun");
1353                 break;
1354         case TUN_TAP_DEV:
1355                 strcpy(info->bus_info, "tap");
1356                 break;
1357         }
1358 }
1359
1360 static u32 tun_get_msglevel(struct net_device *dev)
1361 {
1362 #ifdef TUN_DEBUG
1363         struct tun_struct *tun = netdev_priv(dev);
1364         return tun->debug;
1365 #else
1366         return -EOPNOTSUPP;
1367 #endif
1368 }
1369
1370 static void tun_set_msglevel(struct net_device *dev, u32 value)
1371 {
1372 #ifdef TUN_DEBUG
1373         struct tun_struct *tun = netdev_priv(dev);
1374         tun->debug = value;
1375 #endif
1376 }
1377
1378 static u32 tun_get_link(struct net_device *dev)
1379 {
1380         struct tun_struct *tun = netdev_priv(dev);
1381         return !!tun->tfile;
1382 }
1383
1384 static u32 tun_get_rx_csum(struct net_device *dev)
1385 {
1386         struct tun_struct *tun = netdev_priv(dev);
1387         return (tun->flags & TUN_NOCHECKSUM) == 0;
1388 }
1389
1390 static int tun_set_rx_csum(struct net_device *dev, u32 data)
1391 {
1392         struct tun_struct *tun = netdev_priv(dev);
1393         if (data)
1394                 tun->flags &= ~TUN_NOCHECKSUM;
1395         else
1396                 tun->flags |= TUN_NOCHECKSUM;
1397         return 0;
1398 }
1399
1400 static const struct ethtool_ops tun_ethtool_ops = {
1401         .get_settings   = tun_get_settings,
1402         .get_drvinfo    = tun_get_drvinfo,
1403         .get_msglevel   = tun_get_msglevel,
1404         .set_msglevel   = tun_set_msglevel,
1405         .get_link       = tun_get_link,
1406         .get_rx_csum    = tun_get_rx_csum,
1407         .set_rx_csum    = tun_set_rx_csum
1408 };
1409
1410
1411 static int __init tun_init(void)
1412 {
1413         int ret = 0;
1414
1415         printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
1416         printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT);
1417
1418         ret = rtnl_link_register(&tun_link_ops);
1419         if (ret) {
1420                 printk(KERN_ERR "tun: Can't register link_ops\n");
1421                 goto err_linkops;
1422         }
1423
1424         ret = misc_register(&tun_miscdev);
1425         if (ret) {
1426                 printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR);
1427                 goto err_misc;
1428         }
1429         return  0;
1430 err_misc:
1431         rtnl_link_unregister(&tun_link_ops);
1432 err_linkops:
1433         return ret;
1434 }
1435
1436 static void tun_cleanup(void)
1437 {
1438         misc_deregister(&tun_miscdev);
1439         rtnl_link_unregister(&tun_link_ops);
1440 }
1441
1442 module_init(tun_init);
1443 module_exit(tun_cleanup);
1444 MODULE_DESCRIPTION(DRV_DESCRIPTION);
1445 MODULE_AUTHOR(DRV_COPYRIGHT);
1446 MODULE_LICENSE("GPL");
1447 MODULE_ALIAS_MISCDEV(TUN_MINOR);